diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ff2061e09..cec6a3234 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,32 +4,40 @@ variables: REVIEWDOG_VERSION: 0.9.11 REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN + UNITTESTTAGS: linux sqlite_omit_load_extension before_script: # Setup dependency management tool -# - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep -# - chmod +x $GOPATH/bin/dep -# - go get github.com/mattn/goveralls -# - go get github.com/haya14busa/goverage -# - go get github.com/golang/lint/golint -# - go get github.com/haya14busa/reviewdog/cmd/reviewdog -# - go get github.com/wadey/gocovmerge + # - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep + # - chmod +x $GOPATH/bin/dep + # - go get github.com/mattn/goveralls + # - go get github.com/haya14busa/goverage + # - go get github.com/golang/lint/golint + # - go get github.com/haya14busa/reviewdog/cmd/reviewdog + # - go get github.com/wadey/gocovmerge - mkdir -p $GOPATH/src/github.com/CovenantSQL - cp -r /builds/thunderdb/CovenantSQL $GOPATH/src/github.com/CovenantSQL/ - cd $GOPATH/src/github.com/CovenantSQL/CovenantSQL -# - dep ensure + # - dep ensure - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog test-my-project: + stage: test + script: + - ./alltest.sh + +compatibility-testnet: stage: test script: + - set -o errexit + - set -o pipefail + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - - make use_all_cores - - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - - bash cleanupDB.sh || true - - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - - bash <(curl -s https://codecov.io/bash) + - make -j8 client + - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - set -x + - ./test/testnet_client/run.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index 562baa25a..95bff0e5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [v0.2.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.2.0) (2019-01-05) + +[Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.1.0...v0.2.0) + +**Merged pull requests:** + +- Update GNTE config [\#193](https://github.com/CovenantSQL/CovenantSQL/pull/193) ([laodouya](https://github.com/laodouya)) +- Fix matchProvidersWithUser inconsistent [\#188](https://github.com/CovenantSQL/CovenantSQL/pull/188) ([auxten](https://github.com/auxten)) +- Speed up BPs at genesis startup [\#186](https://github.com/CovenantSQL/CovenantSQL/pull/186) ([leventeliu](https://github.com/leventeliu)) +- Wait for database creation fix [\#185](https://github.com/CovenantSQL/CovenantSQL/pull/185) ([xq262144](https://github.com/xq262144)) +- Simplify cql and cql-utils log [\#184](https://github.com/CovenantSQL/CovenantSQL/pull/184) ([auxten](https://github.com/auxten)) +- Fix Makefile PHONY, add push\_testnet [\#183](https://github.com/CovenantSQL/CovenantSQL/pull/183) ([auxten](https://github.com/auxten)) +- Fix issue: duplicate branches [\#182](https://github.com/CovenantSQL/CovenantSQL/pull/182) ([leventeliu](https://github.com/leventeliu)) +- Update testnet conf [\#181](https://github.com/CovenantSQL/CovenantSQL/pull/181) ([auxten](https://github.com/auxten)) +- Remove base58 wallet address [\#179](https://github.com/CovenantSQL/CovenantSQL/pull/179) ([auxten](https://github.com/auxten)) +- Fix GNTE test config missing miner wallet init coin [\#178](https://github.com/CovenantSQL/CovenantSQL/pull/178) ([laodouya](https://github.com/laodouya)) +- Upgrade transaction structure: add Timestamp field [\#177](https://github.com/CovenantSQL/CovenantSQL/pull/177) ([ggicci](https://github.com/ggicci)) +- Block main cycle when BP network is unreachable [\#176](https://github.com/CovenantSQL/CovenantSQL/pull/176) ([leventeliu](https://github.com/leventeliu)) +- Remove useless hash in base58 encoded private key [\#175](https://github.com/CovenantSQL/CovenantSQL/pull/175) ([auxten](https://github.com/auxten)) +- Prune unused codes [\#174](https://github.com/CovenantSQL/CovenantSQL/pull/174) ([leventeliu](https://github.com/leventeliu)) +- Fix docker entry point [\#173](https://github.com/CovenantSQL/CovenantSQL/pull/173) ([leventeliu](https://github.com/leventeliu)) +- Add permission granting/revoking [\#172](https://github.com/CovenantSQL/CovenantSQL/pull/172) ([leventeliu](https://github.com/leventeliu)) +- Extract observer to an independent docker image [\#163](https://github.com/CovenantSQL/CovenantSQL/pull/163) ([laodouya](https://github.com/laodouya)) + ## [v0.1.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.1.0) (2018-12-29) [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.6...v0.1.0) diff --git a/Gopkg.lock b/Gopkg.lock index e70594996..7f27fcd6c 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -183,6 +183,14 @@ revision = "9a23578d06a26ec1b47bfc8965bf5e7011df8bd6" version = "v1.3.0" +[[projects]] + digest = "1:670d1f29fa2aa15ea777cc5bcf95881f379bf8a71dbbe145be0774da97fede72" + name = "github.com/go-gorp/gorp" + packages = ["."] + pruneopts = "UT" + revision = "4df78490a9aa9a78b9b02b0c913df8dc1954faee" + version = "2.1" + [[projects]] branch = "master" digest = "1:48c0fa64e80c089a88d30ab5b826c106af79eb3c65d48e2280f22aa4d61d7a84" @@ -239,6 +247,25 @@ revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" version = "v1.6.2" +[[projects]] + digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + +[[projects]] + digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "UT" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" + [[projects]] branch = "master" digest = "1:438016f7d4af8e5a7010b6d0705b267a7607ddc0decad051e83a9458c6b9a523" @@ -487,6 +514,17 @@ revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857" version = "1.6.3" +[[projects]] + branch = "master" + digest = "1:782488353b5fc6316c46793ef6d5cf6d0af4eb82492dba1920833b2ffb34881f" + name = "github.com/sourcegraph/jsonrpc2" + packages = [ + ".", + "websocket", + ] + pruneopts = "UT" + revision = "549eb959f029d014d623104d40ab966d159a92de" + [[projects]] branch = "master" digest = "1:685fdfea42d825ebd39ee0994354b46c374cf2c2b2d97a41a8dee1807c6a9b62" @@ -582,6 +620,14 @@ revision = "94e385923345495c4add5c23df1efc6d66964479" version = "v1.0" +[[projects]] + branch = "master" + digest = "1:95be927b2ec224dfd5357bdd9a8f588299779812525d9aeb0740a3713a5a2560" + name = "github.com/zserge/metric" + packages = ["."] + pruneopts = "UT" + revision = "5a5d84c90520d706b4805b4a5cca4c57998868e0" + [[projects]] branch = "master" digest = "1:cfd661f1a52594117f2a753bb640a86d4dbf3e0d778c2641bfbc750e6a1c8be7" @@ -649,8 +695,11 @@ "github.com/davecgh/go-spew/spew", "github.com/dyatlov/go-opengraph/opengraph", "github.com/fortytw2/leaktest", + "github.com/go-gorp/gorp", "github.com/gorilla/handlers", "github.com/gorilla/mux", + "github.com/gorilla/websocket", + "github.com/hashicorp/golang-lru", "github.com/jmoiron/jsonq", "github.com/jordwest/mock-conn", "github.com/lufia/iostat", @@ -660,6 +709,7 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", "github.com/prometheus/common/version", "github.com/prometheus/procfs", "github.com/rcrowley/go-metrics", @@ -668,6 +718,8 @@ "github.com/siddontang/go-mysql/server", "github.com/sirupsen/logrus", "github.com/smartystreets/goconvey/convey", + "github.com/sourcegraph/jsonrpc2", + "github.com/sourcegraph/jsonrpc2/websocket", "github.com/syndtr/goleveldb/leveldb", "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", @@ -681,6 +733,7 @@ "github.com/xo/usql/rline", "github.com/xo/usql/text", "github.com/xtaci/smux", + "github.com/zserge/metric", "golang.org/x/crypto/ed25519", "golang.org/x/crypto/ssh/terminal", "golang.org/x/sys/unix", diff --git a/Gopkg.toml b/Gopkg.toml index 9df96a495..0f7048f56 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -61,6 +61,10 @@ name = "github.com/CovenantSQL/xurls" branch = "master" +[[override]] + name = "github.com/zserge/metric" + branch = "master" + [[override]] name = "github.com/xtaci/smux" branch = "master" diff --git a/Makefile b/Makefile index e85983717..e1bf36bce 100644 --- a/Makefile +++ b/Makefile @@ -94,6 +94,19 @@ push_testnet: docker tag $(IMAGE):$(VERSION) $(IMAGE):testnet docker push $(IMAGE):testnet +push_bench: + docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):bench + docker push $(OB_IMAGE):bench + docker tag $(IMAGE):$(VERSION) $(IMAGE):bench + docker push $(IMAGE):bench + +push_staging: + docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):staging + docker push $(OB_IMAGE):staging + docker tag $(IMAGE):$(VERSION) $(IMAGE):staging + docker push $(IMAGE):staging + + push: docker push $(OB_IMAGE):$(VERSION) docker push $(OB_IMAGE):latest @@ -107,14 +120,14 @@ builddate := $(shell date +%Y%m%d%H%M%S) unamestr := $(shell uname) -ifeq ($(unamestr),"Linux") -platform := "linux" +ifeq ($(unamestr),Linux) +platform := linux endif version := $(branch)-$(GIT_COMMIT)-$(builddate) tags := $(platform) sqlite_omit_load_extension -testtags := $(platform) sqlite_omit_load_extension testbinary +testtags := $(tags) testbinary test_flags := -coverpkg github.com/CovenantSQL/CovenantSQL/... -cover -race -c ldflags_role_bp := -X main.version=$(version) -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=B $$GOLDFLAGS @@ -215,7 +228,9 @@ all: bp miner observer client clean: rm -rf bin/cql* + rm -f *.cover.out + rm -f coverage.txt .PHONY: status start stop logs push push_testnet clean \ - bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils \ + bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils bin/cql-observer bin/cql-observer.test \ bin/cql bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer diff --git a/README-zh.md b/README-zh.md index 6d7b08c2d..e2e7ea681 100644 --- a/README-zh.md +++ b/README-zh.md @@ -32,7 +32,7 @@ CovenantSQL 是应用区块链技术构建的去中心化 SQL 云数据库。 CovenantSQL 具备以下特点: -- **SQL接口**: 支持 SQL-92 标准,传统 App 几乎0修改即可数据上链 +- **SQL接口**: 支持 SQL-92 标准,传统 App 几乎 0 修改即可数据上链 - **去中心化**: 基于独有的高效拜占庭容错共识算法 Kayak 实现的去中心化结构 - **不可篡改**: CovenantSQL 中的 Query 历史记录是可追溯的 - **隐私**: 如果 Bitcoin 是用户的钱包,那么 CovenantSQL 就是是用户的去中心化数据库 @@ -110,7 +110,7 @@ sql.Open("CovenantSQL", dbURI) #### 接口 -CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://testnet.covenantsql.io/). +CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://developers.covenantsql.io/docs/quickstart). - [Golang](client/) @@ -123,8 +123,7 @@ CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://test ## 测试网 -- [快捷入口](https://testnet.covenantsql.io/quickstart) -- [测试网水龙头](https://testnet.covenantsql.io/) +- [快捷入口](https://developers.covenantsql.io) ## 联系我们 diff --git a/README.md b/README.md index a47354134..b322ab190 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ that inspired us: #### Connector -CovenantSQL is still under construction and Testnet is already released, [have a try](https://testnet.covenantsql.io/). +CovenantSQL is still under construction and Testnet is already released, [have a try](https://developers.covenantsql.io/docs/quickstart). - [Golang](client/) @@ -106,8 +106,7 @@ Watch us or [![follow on Twitter](https://img.shields.io/twitter/url/https/twitt ## TestNet -- [Quick Start](https://testnet.covenantsql.io/quickstart) -- [TestNet faucet](https://testnet.covenantsql.io/) +- [Quick Start](https://developers.covenantsql.io) ## Contact diff --git a/alltest.sh b/alltest.sh new file mode 100755 index 000000000..b762c25d6 --- /dev/null +++ b/alltest.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o nounset + +main() { + make clean + make -j6 bp miner observer + + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... + + set -x + gocovmerge main.cover.out api.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out + bash <(curl -s https://codecov.io/bash) + + # some benchmarks + go test -tags "$UNITTESTTAGS" -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ + bash cleanupDB.sh || true + go test -tags "$UNITTESTTAGS" -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + bash cleanupDB.sh || true +} + +main "$@" + diff --git a/api/blocks.go b/api/blocks.go new file mode 100644 index 000000000..040e6be7d --- /dev/null +++ b/api/blocks.go @@ -0,0 +1,74 @@ +package api + +import ( + "context" + "errors" + + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/sourcegraph/jsonrpc2" +) + +func init() { + rpc.RegisterMethod("bp_getBlockList", bpGetBlockList, bpGetBlockListParams{}) + rpc.RegisterMethod("bp_getBlockByHeight", bpGetBlockByHeight, bpGetBlockByHeightParams{}) + rpc.RegisterMethod("bp_getBlockByHash", bpGetBlockByHash, bpGetBlockByHashParams{}) +} + +type bpGetBlockListParams struct { + Since int `json:"since"` + Page int `json:"page"` + Size int `json:"size"` +} + +func (params *bpGetBlockListParams) Validate() error { + if params.Size > 1000 { + return errors.New("max size is 1000") + } + return nil +} + +// BPGetBlockListResponse is the response for method bp_getBlockList. +type BPGetBlockListResponse struct { + Blocks []*models.Block `json:"blocks"` + Pagination *models.Pagination `json:"pagination"` +} + +func bpGetBlockList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockListParams) + model := models.BlocksModel{} + blocks, pagination, err := model.GetBlockList(params.Since, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetBlockListResponse{ + Blocks: blocks, + Pagination: pagination, + } + return result, nil +} + +type bpGetBlockByHeightParams struct { + Height int `json:"height"` +} + +func bpGetBlockByHeight(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockByHeightParams) + model := models.BlocksModel{} + return model.GetBlockByHeight(params.Height) +} + +type bpGetBlockByHashParams struct { + Hash string `json:"hash"` +} + +func bpGetBlockByHash(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockByHashParams) + model := models.BlocksModel{} + return model.GetBlockByHash(params.Hash) +} diff --git a/api/models/blocks.go b/api/models/blocks.go new file mode 100644 index 000000000..d0f3ecc34 --- /dev/null +++ b/api/models/blocks.go @@ -0,0 +1,101 @@ +package models + +import ( + "database/sql" + "time" + + "github.com/go-gorp/gorp" +) + +// BlocksModel groups operations on Blocks. +type BlocksModel struct{} + +// Block is a block. +type Block struct { + Height int `db:"height" json:"height"` // pk + Hash string `db:"hash" json:"hash"` + Timestamp int64 `db:"timestamp" json:"timestamp"` + TimestampHuman time.Time `db:"-" json:"timestamp_human"` + Version int32 `db:"version" json:"version"` + Producer string `db:"producer" json:"producer"` + MerkleRoot string `db:"merkle_root" json:"merkle_root"` + Parent string `db:"parent" json:"parent"` + TxCount int `db:"tx_count" json:"tx_count"` +} + +// PostGet is the hook after SELECT query. +func (b *Block) PostGet(s gorp.SqlExecutor) error { + b.TimestampHuman = time.Unix(0, b.Timestamp) + return nil +} + +// GetBlockList get a list of blocks with height in [from, to). +func (m *BlocksModel) GetBlockList(since, page, size int) (blocks []*Block, pagination *Pagination, err error) { + var ( + querySQL = ` + SELECT + height, + hash, + timestamp, + version, + producer, + merkle_root, + parent, + tx_count + FROM + indexed_blocks + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + if since > 0 { + conds = append(conds, "height < ?") + args = append(args, since) + } + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + blocks = make([]*Block, 0) + if pagination.Offset() > pagination.Total { + return blocks, pagination, nil + } + + querySQL += " ORDER BY height DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) + + _, err = chaindb.Select(&blocks, querySQL, args...) + return blocks, pagination, err +} + +// GetBlockByHeight get a block by its height. +func (m *BlocksModel) GetBlockByHeight(height int) (block *Block, err error) { + block = &Block{} + query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count + FROM indexed_blocks WHERE height = ?` + err = chaindb.SelectOne(block, query, height) + if err == sql.ErrNoRows { + return nil, nil + } + return block, err +} + +// GetBlockByHash get a block by its hash. +func (m *BlocksModel) GetBlockByHash(hash string) (block *Block, err error) { + block = &Block{} + query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count + FROM indexed_blocks WHERE hash = ?` + err = chaindb.SelectOne(block, query, hash) + if err == sql.ErrNoRows { + return nil, nil + } + return block, err +} diff --git a/api/models/models.go b/api/models/models.go new file mode 100644 index 000000000..cf4e295d5 --- /dev/null +++ b/api/models/models.go @@ -0,0 +1,53 @@ +package models + +import ( + "database/sql" + "fmt" + + _ "github.com/CovenantSQL/go-sqlite3-encrypt" // sqlite3 driver + "github.com/go-gorp/gorp" + "github.com/pkg/errors" +) + +var ( + chaindb *gorp.DbMap +) + +// InitModels setup the models package. +func InitModels(dbFile string) error { + return initChainDBConnection(dbFile) +} + +// OpenSQLiteDBAsGorp opens a sqlite database an wrapped it in gorp.DbMap. +func OpenSQLiteDBAsGorp(dbFile, mode string, maxOpen, maxIdle int) (db *gorp.DbMap, err error) { + dsn := fmt.Sprintf("%s?_journal=WAL&mode=%s", dbFile, mode) + underdb, err := sql.Open("sqlite3", dsn) + if err != nil { + return nil, errors.Wrapf(err, "unable to open database %q", dsn) + } + underdb.SetMaxOpenConns(maxOpen) + underdb.SetMaxIdleConns(maxIdle) + + if err := underdb.Ping(); err != nil { + return nil, errors.Wrapf(err, "ping to database %q failed", dsn) + } + + db = &gorp.DbMap{ + Db: underdb, + Dialect: gorp.SqliteDialect{}, + } + return db, nil +} + +func initChainDBConnection(dbFile string) (err error) { + chaindb, err = OpenSQLiteDBAsGorp(dbFile, "ro", 100, 30) + if err != nil { + return err + } + + // register tables + chaindb.AddTableWithName(Block{}, "indexed_blocks").SetKeys(false, "Height") + chaindb.AddTableWithName(Transaction{}, "indexed_transactions").SetKeys(false, "BlockHeight", "TxIndex") + + return nil +} diff --git a/api/models/pagination.go b/api/models/pagination.go new file mode 100644 index 000000000..a96f1b733 --- /dev/null +++ b/api/models/pagination.go @@ -0,0 +1,91 @@ +package models + +import "math" + +// Pagination holds paging info for list like API. +type Pagination struct { + Page int `json:"page"` + Size int `json:"size"` + Total int `json:"total"` + Pages int `json:"pages"` + + defaultSize int +} + +// PaginationOpt represents extra pagination options to apply. +type PaginationOpt func(*Pagination) + +// WithDefaultSize set pagination default size. +func WithDefaultSize(size int) PaginationOpt { + return func(p *Pagination) { + if size < 0 { + p.defaultSize = 10 + return + } + p.defaultSize = size + } +} + +// NewPagination creates a new Pagination. +func NewPagination(page, size int, opts ...PaginationOpt) *Pagination { + p := &Pagination{ + Page: page, + Size: size, + defaultSize: 10, + } + + for _, opt := range opts { + if opt != nil { + opt(p) + } + } + + p.normalize() + return p +} + +func (p *Pagination) normalize() { + if p.Page <= 0 { + p.Page = 1 + } + if p.Size <= 0 { + p.Size = p.defaultSize + } + if p.Total <= 0 { + p.Total = 0 + } + + p.Pages = int(math.Ceil(float64(p.Total) / float64(p.Size))) +} + +// SetPage update current page index. +func (p *Pagination) SetPage(page int) { + p.Page = page + p.normalize() +} + +// SetSize update page size. +func (p *Pagination) SetSize(size int) { + p.Size = size + p.normalize() +} + +// SetTotal update the total records. +func (p *Pagination) SetTotal(total int) { + p.Total = total + p.normalize() +} + +// Limit returns the page size. +// Attened to be used in SQL statements. +func (p *Pagination) Limit() int { + p.normalize() + return p.Size +} + +// Offset returns the size of skipped items of current page. +// Attened to be used in SQL statements. +func (p *Pagination) Offset() int { + p.normalize() + return (p.Page - 1) * p.Size +} diff --git a/api/models/transactions.go b/api/models/transactions.go new file mode 100644 index 000000000..130b9ca51 --- /dev/null +++ b/api/models/transactions.go @@ -0,0 +1,151 @@ +package models + +import ( + "database/sql" + "encoding/json" + "time" + + "github.com/go-gorp/gorp" +) + +// TransactionsModel groups operations on Transactions. +type TransactionsModel struct{} + +// Transaction is a transaction. +type Transaction struct { + BlockHeight int `db:"block_height" json:"block_height"` // pk1 + TxIndex int `db:"tx_index" json:"index"` // pk2 + Hash string `db:"hash" json:"hash"` + BlockHash string `db:"block_hash" json:"block_hash"` + Timestamp int64 `db:"timestamp" json:"timestamp"` + TimestampHuman time.Time `db:"-" json:"timestamp_human"` + TxType int `db:"tx_type" json:"type"` + Address string `db:"address" json:"address"` + Raw string `db:"raw" json:"raw"` + Tx interface{} `db:"-" json:"tx"` +} + +// PostGet is the hook after SELECT query. +func (tx *Transaction) PostGet(s gorp.SqlExecutor) error { + tx.TimestampHuman = time.Unix(0, tx.Timestamp) + return json.Unmarshal([]byte(tx.Raw), &tx.Tx) +} + +// GetTransactionByHash get a transaction by its hash. +func (m *TransactionsModel) GetTransactionByHash(hash string) (tx *Transaction, err error) { + tx = &Transaction{} + query := `SELECT block_height, tx_index, hash, block_hash, timestamp, tx_type, + address, raw + FROM indexed_transactions WHERE hash = ?` + err = chaindb.SelectOne(tx, query, hash) + if err == sql.ErrNoRows { + return nil, nil + } + return tx, err +} + +// GetTransactionListOfBlock get a transaction list of block. +func (m *TransactionsModel) GetTransactionListOfBlock(ofBlockHeight int, page, size int) ( + txs []*Transaction, pagination *Pagination, err error, +) { + var ( + querySQL = ` + SELECT + block_height, + tx_index, + hash, + block_hash, + timestamp, + tx_type, + address, + raw + FROM + indexed_transactions + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + conds = append(conds, "block_height = ?") + args = append(args, ofBlockHeight) + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + if pagination.Offset() > pagination.Total { + return txs, pagination, nil + } + + querySQL += " ORDER BY tx_index DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) + + _, err = chaindb.Select(&txs, querySQL, args...) + return txs, pagination, err +} + +// GetTransactionList get a transaction list by hash marker. +func (m *TransactionsModel) GetTransactionList(since string, page, size int) ( + txs []*Transaction, pagination *Pagination, err error, +) { + var ( + sinceBlockHeight = 0 + sinceTxIndex = 0 + ) + + if since != "" { + tx, err := m.GetTransactionByHash(since) + if tx == nil { + return txs, pagination, err + } + sinceBlockHeight = tx.BlockHeight + sinceTxIndex = tx.TxIndex + } + + var ( + querySQL = ` + SELECT + block_height, + tx_index, + hash, + block_hash, + timestamp, + tx_type, + address, + raw + FROM + indexed_transactions + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + if sinceBlockHeight > 0 { + conds = append(conds, "(block_height < ? or (block_height = ? and tx_index < ?))") + args = append(args, sinceBlockHeight, sinceBlockHeight, sinceTxIndex) + } + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + if pagination.Offset() > pagination.Total { + return txs, pagination, nil + } + + querySQL += " ORDER BY block_height DESC, tx_index DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) + + _, err = chaindb.Select(&txs, querySQL, args...) + return txs, pagination, err +} diff --git a/api/models/utils.go b/api/models/utils.go new file mode 100644 index 000000000..b8c274a75 --- /dev/null +++ b/api/models/utils.go @@ -0,0 +1,22 @@ +package models + +import ( + "regexp" + "strings" +) + +var ( + selectFromRegexp = regexp.MustCompile("(?is)select\\s+.+?\\s+from") +) + +func buildCountSQL(querySQL string) string { + return selectFromRegexp.ReplaceAllString(querySQL, "SELECT count(*) FROM") +} + +func buildSQLWithConds(querySQL, countSQL string, conds []string) (newQuerySQL, newCountSQL string) { + whereSQL := "" + if len(conds) > 0 { + whereSQL = " WHERE " + strings.Join(conds, " AND ") + } + return querySQL + whereSQL, countSQL + whereSQL +} diff --git a/api/service.go b/api/service.go new file mode 100644 index 000000000..510a20462 --- /dev/null +++ b/api/service.go @@ -0,0 +1,40 @@ +package api + +import ( + "net/http" + "time" + + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/CovenantSQL/CovenantSQL/rpc/jsonrpc" + "github.com/pkg/errors" +) + +var ( + rpc = jsonrpc.NewHandler() + server *jsonrpc.WebsocketServer +) + +func init() { + server = &jsonrpc.WebsocketServer{ + Server: http.Server{ + ReadTimeout: 30 * time.Second, + WriteTimeout: 60 * time.Second, + }, + } +} + +// Serve runs an API server on the specified address and database file. +func Serve(addr, dbFile string) error { + // setup database + if err := models.InitModels(dbFile); err != nil { + return errors.WithMessage(err, "api: init models failed") + } + server.Addr = addr + server.RPCHandler = rpc + return server.Serve() +} + +// StopService stops the API server. +func StopService() { + server.Stop() +} diff --git a/api/service_test.go b/api/service_test.go new file mode 100644 index 000000000..8f22495af --- /dev/null +++ b/api/service_test.go @@ -0,0 +1,560 @@ +package api_test + +import ( + "context" + "fmt" + "os" + "path/filepath" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/api" + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/pkg/errors" + + "github.com/gorilla/websocket" + . "github.com/smartystreets/goconvey/convey" + "github.com/sourcegraph/jsonrpc2" + wsstream "github.com/sourcegraph/jsonrpc2/websocket" +) + +const ( + bpA = "9jt00yI91HQ4bCdFfkXWeg" + bpB = "3ToG8OstmKcWCzLXRy2K0w" + addrA = "9JvxiUpBFpkUCCiYf84OCw" + addrB = "I4TezPRXrdBZM9Mp7cr3Gw" +) + +var ( + testdb, _ = filepath.Abs("./testdb.db3") + + ddls = []string{ + `CREATE TABLE IF NOT EXISTS "indexed_blocks" ( + "height" INTEGER PRIMARY KEY, + "hash" TEXT, + "timestamp" INTEGER, + "version" INTEGER, + "producer" TEXT, + "merkle_root" TEXT, + "parent" TEXT, + "tx_count" INTEGER + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__hash" ON "indexed_blocks" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__timestamp" ON "indexed_blocks" ("timestamp" DESC);`, + + `CREATE TABLE IF NOT EXISTS "indexed_transactions" ( + "block_height" INTEGER, + "tx_index" INTEGER, + "hash" TEXT, + "block_hash" TEXT, + "timestamp" INTEGER, + "tx_type" INTEGER, + "address" TEXT, + "raw" TEXT, + PRIMARY KEY ("block_height", "tx_index") + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__hash" ON "indexed_transactions" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__block_hash" ON "indexed_transactions" ("block_hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__timestamp" ON "indexed_transactions" ("timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__tx_type__timestamp" ON "indexed_transactions" ("tx_type", "timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__address__timestamp" ON "indexed_transactions" ("address", "timestamp" DESC);`, + } + + blocksMockData = [][]interface{}{ + {1, "HGGcDJqO7tuZWwJyFxRl9g", 1546589042828174631, 1, bpA, "apple", "0000000000000000000000", 0}, + {2, "pfp8ZcSwhg15W2YSaooX8g", 1546589042482919184, 1, bpA, "apple", "HGGcDJqO7tuZWwJyFxRl9g", 1}, + {3, "NP5Ze1z8hfdG5_G8StXYLw", 1546589042010844731, 1, bpA, "apple", "pfp8ZcSwhg15W2YSaooX8g", 0}, + {4, "gZpo0Y_Wh9u6TxAnFWmiMQ", 1546589042185749429, 1, bpA, "apple", "NP5Ze1z8hfdG5_G8StXYLw", 0}, + {5, "mXMsSXd0OY5MocYl3b5r4Q", 1546589042858585920, 1, bpA, "apple", "gZpo0Y_Wh9u6TxAnFWmiMQ", 0}, + {6, "K7aFl5KIW_xKrUmfpJt6Zg", 1546590006812948193, 1, bpB, "google", "mXMsSXd0OY5MocYl3b5r4Q", 0}, + {7, "iTbk_EvsiprSwLLpC9LOgg", 1546590006885392010, 1, bpB, "google", "K7aFl5KIW_xKrUmfpJt6Zg", 5}, + {8, "RjbeqFM8weHtCSoL_pKurQ", 1546590006585839201, 1, bpB, "google", "iTbk_EvsiprSwLLpC9LOgg", 0}, + {9, "IPS7_Ttp7vdcice8EAWx0g", 1546590006919858504, 1, bpB, "google", "RjbeqFM8weHtCSoL_pKurQ", 0}, + {10, "er05e7FvAZOP3gP5_w_RKw", 1546590006857575843, 1, bpB, "google", "IPS7_Ttp7vdcice8EAWx0g", 3}, + {11, "f0_Dk_vFItabbmcnxNxrTA", 1546590200951918474, 1, bpB, "google", "er05e7FvAZOP3gP5_w_RKw", 0}, + {12, "1pkuZ0pk1d4lzItxrA73KQ", 1546590208582918459, 1, bpB, "google", "f0_Dk_vFItabbmcnxNxrTA", 0}, + {13, "WbhKd7fPzX2Mr8JFyVOljw", 1546590200101838483, 1, bpB, "google", "1pkuZ0pk1d4lzItxrA73KQ", 0}, + {14, "niLUTZpEpOWpPx011bZGlg", 1546590200058583818, 1, bpB, "google", "WbhKd7fPzX2Mr8JFyVOljw", 0}, + } + + transactionsMockData = [][]interface{}{ + {2, 0, "o362ksNHl8gIL4cbXjkMEQ", "pfp8ZcSwhg15W2YSaooX8g", 1546591119847974875, 1, addrA, `{}`}, + {7, 0, "CKI1kAfqOxWpmUug23OxTQ", "iTbk_EvsiprSwLLpC9LOgg", 1546591304102924848, 1, addrA, `{}`}, + {7, 1, "nLwnh4a9oiOG9n4FtgboRw", "iTbk_EvsiprSwLLpC9LOgg", 1546591304284859585, 4, addrB, `{}`}, + {7, 2, "mrsmkMHz1mcXwsOJDakLxA", "iTbk_EvsiprSwLLpC9LOgg", 1546591304583827173, 2, addrB, `{}`}, + {7, 3, "YrJ64M2odTb96B4VHIWCMw", "iTbk_EvsiprSwLLpC9LOgg", 1546591304847472713, 2, addrA, `{}`}, + {7, 4, "7iCSm4vy4FvAapGCT2p9MA", "iTbk_EvsiprSwLLpC9LOgg", 1546591304901837474, 1, addrB, `{}`}, + {10, 0, "U1s0IRuyLd3iw8PdlAKv4A", "er05e7FvAZOP3gP5_w_RKw", 1546591421847471717, 1, addrA, `{}`}, + {10, 1, "5MX357EQDlMUxZVPjjXeFQ", "er05e7FvAZOP3gP5_w_RKw", 1546591421791893744, 4, addrB, `{}`}, + {10, 2, "lXTWT_P7NRxMHukZCEUfng", "er05e7FvAZOP3gP5_w_RKw", 1546591421909181774, 2, addrB, `{}`}, + } +) + +func mockData(t *testing.T) { + db, err := models.OpenSQLiteDBAsGorp(testdb, "rw", 5, 2) + if err != nil { + t.Errorf("open testdb failed") + return + } + defer db.Db.Close() + + // create tables + for _, ddlSQL := range ddls { + if i, err := db.Exec(ddlSQL); err != nil { + t.Errorf("execute ddl #%d failed: %v", i, err) + } + } + + var insertRows = func(writeSQL string, data [][]interface{}) error { + for i, row := range data { + if _, err := db.Exec(writeSQL, row...); err != nil { + return errors.Wrapf(err, "write row #%d failed", i) + } + } + return nil + } + + if err := insertRows( + "insert into indexed_blocks values (?,?,?,?,?,?,?,?)", + blocksMockData, + ); err != nil { + t.Errorf("mock data for indexed_blocks failed: %v", err) + } + + if err := insertRows( + "insert into indexed_transactions values (?,?,?,?,?,?,?,?)", + transactionsMockData, + ); err != nil { + t.Errorf("mock data for indexed_transactions failed: %v", err) + } +} + +func setupWebsocketClient(addr string) (client *jsonrpc2.Conn, err error) { + var dial = func(ctx context.Context, addr string) (client *jsonrpc2.Conn, err error) { + conn, _, err := websocket.DefaultDialer.DialContext( + context.Background(), + addr, + nil, + ) + if err != nil { + return nil, err + } + + var connOpts []jsonrpc2.ConnOpt + return jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + nil, + connOpts..., + ), nil + } + + for i := 0; i < 3; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + client, err = dial(ctx, addr) + if err == nil { + break + } + } + + return client, err +} + +type bpGetBlockListTestCase struct { + Since int + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination +} + +func (c *bpGetBlockListTestCase) String() string { + return fmt.Sprintf("fetch %d blocks at page %d since %d", c.Size, c.Page, c.Since) +} + +func (c *bpGetBlockListTestCase) Params() interface{} { + return []interface{}{c.Since, c.Page, c.Size} +} + +type bpGetBlockTestCase struct { + Height int + Hash string + ExpectedResult []interface{} +} + +func (c *bpGetBlockTestCase) String() string { + return fmt.Sprintf("fetch block of height %d hashed %q", c.Height, c.Hash) +} + +type bpGetTransactionListTestCase struct { + Since string + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination +} + +func (c *bpGetTransactionListTestCase) Params() interface{} { + return []interface{}{c.Since, c.Page, c.Size} +} + +func (c *bpGetTransactionListTestCase) String() string { + return fmt.Sprintf("fetch %d transactions at page %d since %s", c.Size, c.Page, c.Since) +} + +type bpGetTransactionListOfBlockTestCase struct { + BlockHeight int + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination +} + +func (c *bpGetTransactionListOfBlockTestCase) Params() interface{} { + return []interface{}{c.BlockHeight, c.Page, c.Size} +} + +func (c *bpGetTransactionListOfBlockTestCase) String() string { + return fmt.Sprintf("fetch %d transactions at page %d of block %d", c.Size, c.Page, c.BlockHeight) +} + +type bpGetTransactionByHashTestCase struct { + Hash string + ExpectedResult []interface{} +} + +func (c *bpGetTransactionByHashTestCase) String() string { + return fmt.Sprintf("fetch transaction hashed %q", c.Hash) +} + +func TestJSONRPCService(t *testing.T) { + t.Logf("testdb: %s", testdb) + mockData(t) + defer os.Remove(testdb + "-shm") + defer os.Remove(testdb + "-wal") + defer os.Remove(testdb) + + // log.SetLevel(log.DebugLevel) + go api.Serve(":8546", testdb) + defer api.StopService() + + var ( + addr = "ws://localhost:8546" + conveyBlock = func(convey C, item *models.Block, cp []interface{}) { + if cp == nil { + convey.So(item, ShouldBeNil) + return + } + convey.So(item.Height, ShouldEqual, cp[0].(int)) + convey.So(item.Hash, ShouldEqual, cp[1].(string)) + convey.So(item.Timestamp, ShouldEqual, cp[2].(int)) + convey.So(item.TimestampHuman.UnixNano(), ShouldEqual, item.Timestamp) + convey.So(item.Version, ShouldEqual, cp[3].(int)) + convey.So(item.Producer, ShouldEqual, cp[4].(string)) + convey.So(item.MerkleRoot, ShouldEqual, cp[5].(string)) + convey.So(item.Parent, ShouldEqual, cp[6].(string)) + convey.So(item.TxCount, ShouldEqual, cp[7].(int)) + } + + conveyTransaction = func(convey C, item *models.Transaction, cp []interface{}) { + if cp == nil { + convey.So(item, ShouldBeNil) + return + } + + convey.So(item.BlockHeight, ShouldEqual, cp[0].(int)) + convey.So(item.TxIndex, ShouldEqual, cp[1].(int)) + convey.So(item.Hash, ShouldEqual, cp[2].(string)) + convey.So(item.BlockHash, ShouldEqual, cp[3].(string)) + convey.So(item.Timestamp, ShouldEqual, cp[4].(int)) + convey.So(item.TimestampHuman.UnixNano(), ShouldEqual, item.Timestamp) + convey.So(item.TxType, ShouldEqual, cp[5].(int)) + convey.So(item.Address, ShouldEqual, cp[6].(string)) + convey.So(item.Raw, ShouldEqual, cp[7].(string)) + } + ) + + Convey("API not found", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("call method should fail if method not found", func() { + var result interface{} + err := rpc.Call(context.Background(), "method_NotFound", nil, &result) + So(err, ShouldNotBeNil) + }) + + Reset(func() { + rpc.Close() + }) + }) + + Convey("blocks API", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("bp_getBlockList should fail on invalid parameters", func() { + var ( + result []*models.Block + testCases = map[string][]int{ + "page over 1000": {0, 1, 10001}, + "invalid number of parameters": {0}, + "nil parameters": nil, + } + ) + + for name, testCase := range testCases { + Convey(name, func() { + err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result) + So(err, ShouldNotBeNil) + }) + } + + }) + + Convey("bp_getBlockList should success on fetching valid number of blocks", func() { + var ( + result = new(api.BPGetBlockListResponse) + testCases = []*bpGetBlockListTestCase{ + {0, 1, 10, blocksMockData[4:14], &models.Pagination{Page: 1, Size: 10, Total: 14, Pages: 2}}, + {14, 1, 5, blocksMockData[8:13], &models.Pagination{Page: 1, Size: 5, Total: 13, Pages: 3}}, + {14, 2, 5, blocksMockData[3:8], &models.Pagination{Page: 2, Size: 5, Total: 13, Pages: 3}}, + {14, 3, 5, blocksMockData[0:3], &models.Pagination{Page: 3, Size: 5, Total: 13, Pages: 3}}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func(c C) { + err := rpc.Call(context.Background(), "bp_getBlockList", testCase.Params(), &result) + So(err, ShouldBeNil) + So(len(result.Blocks), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, block := range result.Blocks { + conveyBlock(c, block, testCase.ExpectedResults[len(result.Blocks)-i-1]) + } + }) + } + }) + + Convey("bp_getBlockByHash should fetch blocks on existed hash and nothing for an non-existed hash", func(c C) { + var ( + result = new(models.Block) + + testCases = []*bpGetBlockTestCase{ + {0, "o362ksNHl8gIL4cbXjkMEQ", nil}, + {0, "HGGcDJqO7tuZWwJyFxRl9g", blocksMockData[0]}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getBlockByHash", + []interface{}{testCase.Hash}, + &result, + ) + So(err, ShouldBeNil) + conveyBlock(c, result, testCase.ExpectedResult) + }) + } + }) + + Convey("bp_getBlockByHeight should fetch blocks on existed height and nothing for an non-existed height", func(c C) { + var ( + result = new(models.Block) + + testCases = []*bpGetBlockTestCase{ + {192124141, "", nil}, + {1, "", blocksMockData[0]}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getBlockByHeight", + []interface{}{testCase.Height}, + &result, + ) + So(err, ShouldBeNil) + conveyBlock(c, result, testCase.ExpectedResult) + }) + } + }) + + Reset(func() { + // teardown + rpc.Close() + }) + }) + + Convey("transactions API", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("bp_getTransactionList should fail on invalid parameters", func() { + var ( + result []*models.Transaction + invalidParameterCases = map[string][]interface{}{ + "size over 1000": {"nLwnh4a9oiOG9n4FtgboRw", 1, 1001}, + } + ) + + for name, testCase := range invalidParameterCases { + Convey(name, func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionList", + testCase, + &result, + ) + So(err, ShouldNotBeNil) + }) + } + }) + + Convey("bp_getTransactionList should success on fetching valid number of transactions", func(c C) { + var ( + result = new(api.BPGetTransactionListResponse) + testCases = []bpGetTransactionListTestCase{ + { + "5MX357EQDlMUxZVPjjXeFQ", 1, 5, transactionsMockData[2:7], + &models.Pagination{Page: 1, Size: 5, Total: 7, Pages: 2}, + }, + { + "5MX357EQDlMUxZVPjjXeFQ", 2, 5, transactionsMockData[0:2], + &models.Pagination{Page: 2, Size: 5, Total: 7, Pages: 2}, + }, + { + "CKI1kAfqOxWpmUug23OxTQ", 1, 3, transactionsMockData[0:1], + &models.Pagination{Page: 1, Size: 3, Total: 1, Pages: 1}, + }, + { + "CKI1kAfqOxWpmUug23OxTQ", 2, 3, nil, + &models.Pagination{Page: 2, Size: 3, Total: 1, Pages: 1}, + }, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionList", + testCase.Params(), + &result, + ) + So(err, ShouldBeNil) + So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, item := range result.Transactions { + cp := testCase.ExpectedResults[len(result.Transactions)-i-1] + conveyTransaction(c, item, cp) + } + }) + } + }) + + Convey("bp_getTransactionListOfBlock should fail on invalid parameters", func(c C) { + var ( + result = new(api.BPGetTransactionListResponse) + testCases = map[string][]interface{}{ + "invalid block height": {0, 1, 10}, + "page size over 1000": {10, 1, 1001}, + } + ) + + for name, testCase := range testCases { + Convey(name, func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionListOfBlock", + testCase, + &result, + ) + So(err, ShouldNotBeNil) + }) + } + }) + + Convey("bp_getTransactionListOfBlock should success on fetching valid number of transactions", func(c C) { + var ( + result = new(api.BPGetTransactionListResponse) + testCases = []bpGetTransactionListOfBlockTestCase{ + { + 7, 1, 3, transactionsMockData[3:6], + &models.Pagination{Page: 1, Size: 3, Total: 5, Pages: 2}, + }, + { + 7, 2, 3, transactionsMockData[1:3], + &models.Pagination{Page: 2, Size: 3, Total: 5, Pages: 2}, + }, + { + 1, 1, 10, nil, + &models.Pagination{Page: 1, Size: 10, Total: 0, Pages: 0}, + }, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionListOfBlock", + testCase.Params(), + &result, + ) + So(err, ShouldBeNil) + So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, item := range result.Transactions { + cp := testCase.ExpectedResults[len(result.Transactions)-i-1] + conveyTransaction(c, item, cp) + } + }) + } + }) + + Convey("bp_getTransactionByHash should fetch transactions on existed hash and nothing for an non-existed hash", func(c C) { + var ( + result = new(models.Transaction) + + testCases = []*bpGetTransactionByHashTestCase{ + {"o362ksNHl8gIL4cbXjkMEQ", transactionsMockData[0]}, + {"HGGcDJqO7tuZWwJyFxRl9g", nil}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionByHash", + []interface{}{testCase.Hash}, + &result, + ) + So(err, ShouldBeNil) + conveyTransaction(c, result, testCase.ExpectedResult) + }) + } + }) + + Reset(func() { + rpc.Close() + }) + }) +} diff --git a/api/transactions.go b/api/transactions.go new file mode 100644 index 000000000..cc88ecaae --- /dev/null +++ b/api/transactions.go @@ -0,0 +1,95 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/sourcegraph/jsonrpc2" +) + +func init() { + rpc.RegisterMethod("bp_getTransactionList", bpGetTransactionList, bpGetTransactionListParams{}) + rpc.RegisterMethod("bp_getTransactionByHash", bpGetTransactionByHash, bpGetTransactionByHashParams{}) + rpc.RegisterMethod("bp_getTransactionListOfBlock", bpGetTransactionListOfBlock, bpGetTransactionListOfBlockParams{}) +} + +type bpGetTransactionListParams struct { + Since string `json:"since"` + Page int `json:"page"` + Size int `json:"size"` +} + +func (params *bpGetTransactionListParams) Validate() error { + if params.Size > 1000 { + return errors.New("max size is 1000") + } + return nil +} + +// BPGetTransactionListResponse is the response for method bp_getTransactionList. +type BPGetTransactionListResponse struct { + Transactions []*models.Transaction `json:"transactions"` + Pagination *models.Pagination `json:"pagination"` +} + +func bpGetTransactionList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetTransactionListParams) + model := models.TransactionsModel{} + transactions, pagination, err := model.GetTransactionList(params.Since, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetTransactionListResponse{ + Transactions: transactions, + Pagination: pagination, + } + return result, nil +} + +type bpGetTransactionListOfBlockParams struct { + BlockHeight int `json:"height"` + Page int `json:"page"` + Size int `json:"size"` +} + +func (params *bpGetTransactionListOfBlockParams) Validate() error { + if params.BlockHeight < 1 { + return fmt.Errorf("invalid block height %d", params.BlockHeight) + } + if params.Size > 1000 { + return errors.New("max size is 1000") + } + return nil +} + +func bpGetTransactionListOfBlock(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetTransactionListOfBlockParams) + model := models.TransactionsModel{} + transactions, pagination, err := model.GetTransactionListOfBlock(params.BlockHeight, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetTransactionListResponse{ + Transactions: transactions, + Pagination: pagination, + } + return result, nil +} + +type bpGetTransactionByHashParams struct { + Hash string `json:"hash"` +} + +func bpGetTransactionByHash(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetTransactionByHashParams) + model := models.TransactionsModel{} + return model.GetTransactionByHash(params.Hash) +} diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 6c3e5835b..cb7619ed8 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -1,13 +1,15 @@ #!/bin/sh -echo nameserver 1.1.1.1 > /etc/resolv.conf +echo nameserver 114.114.114.114 > /etc/resolv.conf + +[ -s "${COVENANT_ALERT}" ] && [ -x "${COVENANT_ALERT}" ] && (eval "${COVENANT_ALERT}") case "${COVENANT_ROLE}" in miner) - exec /app/cql-minerd -config "${COVENANT_CONF}" "${@}" + exec /app/cql-minerd -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; blockproducer) - exec /app/cqld -config "${COVENANT_CONF}" "${@}" + exec /app/cqld -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; observer) MAGIC_DOLLAR='$' envsubst < /etc/nginx/conf.d/servers/explorer.conf.template > /etc/nginx/conf.d/default.conf diff --git a/blockproducer/blocknode.go b/blockproducer/blocknode.go index 8a0aaacc8..317ee655f 100644 --- a/blockproducer/blocknode.go +++ b/blockproducer/blocknode.go @@ -43,7 +43,7 @@ func newBlockNode(h uint32, b *types.BPBlock, p *blockNode) *blockNode { }(), height: h, - hash: b.SignedHeader.BlockHash, + hash: b.SignedHeader.DataHash, block: b, } } diff --git a/blockproducer/blocknode_test.go b/blockproducer/blocknode_test.go index dcebfadb3..09299abdc 100644 --- a/blockproducer/blocknode_test.go +++ b/blockproducer/blocknode_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -29,39 +30,49 @@ func TestBlockNode(t *testing.T) { var ( b0 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ - BlockHash: hash.Hash{0x1}, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x1}, + }, }, } b1 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b0.SignedHeader.BlockHash, + ParentHash: b0.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x2}, }, - BlockHash: hash.Hash{0x2}, }, } b2 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b1.SignedHeader.BlockHash, + ParentHash: b1.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x3}, }, - BlockHash: hash.Hash{0x3}, }, } b3 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b2.SignedHeader.BlockHash, + ParentHash: b2.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x4}, }, - BlockHash: hash.Hash{0x4}, }, } b4 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b3.SignedHeader.BlockHash, + ParentHash: b3.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x5}, }, - BlockHash: hash.Hash{0x5}, }, } n0 = newBlockNode(0, b0, nil) @@ -73,17 +84,21 @@ func TestBlockNode(t *testing.T) { b3p = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b2.SignedHeader.BlockHash, + ParentHash: b2.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x6}, }, - BlockHash: hash.Hash{0x6}, }, } b4p = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b3p.SignedHeader.BlockHash, + ParentHash: b3p.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x7}, }, - BlockHash: hash.Hash{0x7}, }, } n3p = newBlockNode(3, b3p, n2) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go new file mode 100644 index 000000000..c95a7ec52 --- /dev/null +++ b/blockproducer/bpinfo.go @@ -0,0 +1,80 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockproducer + +import ( + "fmt" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +type blockProducerInfo struct { + rank uint32 + total uint32 + role string + nodeID proto.NodeID +} + +// String implements fmt.Stringer. +func (i *blockProducerInfo) String() string { + return fmt.Sprintf("[%d/%d|%s] %s", i.rank+1, i.total, i.role, i.nodeID) +} + +func buildBlockProducerInfos( + localNodeID proto.NodeID, peers *proto.Peers, isAPINode bool, +) ( + localBPInfo *blockProducerInfo, bpInfos []*blockProducerInfo, err error, +) { + var ( + total = len(peers.PeersHeader.Servers) + index int32 + found bool + ) + + bpInfos = make([]*blockProducerInfo, total) + for i, v := range peers.PeersHeader.Servers { + var role = "F" + if v == peers.Leader { + role = "L" + } + bpInfos[i] = &blockProducerInfo{ + rank: uint32(i), + total: uint32(total), + role: role, + nodeID: v, + } + } + + if isAPINode { + localBPInfo = &blockProducerInfo{ + rank: 0, + total: uint32(total), + role: "A", + nodeID: localNodeID, + } + return localBPInfo, bpInfos, nil + } + + if index, found = peers.Find(localNodeID); !found { + err = ErrLocalNodeNotFound + return + } + + localBPInfo = bpInfos[index] + + return +} diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 8c511ccef..fcba6e5dc 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -22,6 +22,7 @@ import ( "time" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/conf" ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" @@ -56,6 +57,10 @@ func newBranch( } // Apply new blocks to view and pool for _, bn := range list { + if len(bn.block.Transactions) > conf.MaxTransactionsPerBlock { + return nil, ErrTooManyTransactionsInBlock + } + for _, v := range bn.block.Transactions { var k = v.Hash() // Check in tx pool @@ -126,6 +131,11 @@ func (b *branch) applyBlock(n *blockNode) (br *branch, err error) { return } var cpy = b.makeArena() + + if len(n.block.Transactions) > conf.MaxTransactionsPerBlock { + return nil, ErrTooManyTransactionsInBlock + } + for _, v := range n.block.Transactions { var k = v.Hash() // Check in tx pool @@ -172,11 +182,17 @@ func (b *branch) produceBlock( br *branch, bl *types.BPBlock, err error, ) { var ( - cpy = b.makeArena() - txs = cpy.sortUnpackedTxs() - out = make([]pi.Transaction, 0, len(txs)) - ierr error + cpy = b.makeArena() + txs = cpy.sortUnpackedTxs() + ierr error + packCount = conf.MaxTransactionsPerBlock ) + + if len(txs) < packCount { + packCount = len(txs) + } + + out := make([]pi.Transaction, 0, packCount) for _, v := range txs { var k = v.Hash() if ierr = cpy.preview.apply(v); ierr != nil { @@ -185,7 +201,11 @@ func (b *branch) produceBlock( delete(cpy.unpacked, k) cpy.packed[k] = v out = append(out, v) + if len(out) == packCount { + break + } } + // Create new block and update head var block = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ @@ -214,6 +234,24 @@ func (b *branch) clearPackedTxs(txs []pi.Transaction) { } } +func (b *branch) clearUnpackedTxs(txs []pi.Transaction) { + for _, v := range txs { + delete(b.unpacked, v.Hash()) + } +} + +func (b *branch) queryTxState(hash hash.Hash) (state pi.TransactionState, ok bool) { + if _, ok = b.unpacked[hash]; ok { + state = pi.TransactionStatePending + return + } + if _, ok = b.packed[hash]; ok { + state = pi.TransactionStatePacked + return + } + return +} + func (b *branch) sprint(from uint32) (buff string) { var nodes = b.head.fetchNodeList(from) for i, v := range nodes { diff --git a/blockproducer/chain.go b/blockproducer/chain.go index cea195eda..0e05ea9d8 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -18,13 +18,15 @@ package blockproducer import ( "context" + "expvar" "fmt" "math" "os" "sync" - "sync/atomic" "time" + mw "github.com/zserge/metric" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/chainbus" "github.com/CovenantSQL/CovenantSQL/conf" @@ -32,7 +34,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/merkle" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -50,13 +51,13 @@ type Chain struct { wg *sync.WaitGroup // RPC components server *rpc.Server - cl *rpc.Caller + caller *rpc.Caller // Other components - st xi.Storage - bs chainbus.Bus + storage xi.Storage + chainBus chainbus.Bus // Channels for incoming blocks and transactions - pendingBlocks chan *types.BPBlock - pendingTxs chan pi.Transaction + pendingBlocks chan *types.BPBlock + pendingAddTxReqs chan *types.AddTxReq // The following fields are read-only in runtime address proto.AccountAddress genesisTime time.Time @@ -64,11 +65,10 @@ type Chain struct { tick time.Duration sync.RWMutex // protects following fields - peers *proto.Peers - nodeID proto.NodeID + bpInfos []*blockProducerInfo + localBPInfo *blockProducerInfo + localNodeID proto.NodeID confirms uint32 - serversNum uint32 - locSvIndex uint32 nextHeight uint32 offset time.Duration lastIrre *blockNode @@ -77,10 +77,16 @@ type Chain struct { headBranch *branch branches []*branch txPool map[hash.Hash]pi.Transaction + mode RunMode } // NewChain creates a new blockchain. func NewChain(cfg *Config) (c *Chain, err error) { + // Normally, NewChain() should only be called once in app. + // So, we just check expvar without a lock + if expvar.Get("height") == nil { + expvar.Publish("height", mw.NewGauge("5m1s")) + } return NewChainWithContext(context.Background(), cfg) } @@ -88,7 +94,6 @@ func NewChain(cfg *Config) (c *Chain, err error) { func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) { var ( existed bool - ok bool ierr error cld context.Context @@ -107,26 +112,40 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) br, head *branch headIndex int - pubKey *asymmetric.PublicKey - addr proto.AccountAddress - locSvIndex int32 + pubKey *asymmetric.PublicKey + addr proto.AccountAddress + bpInfos []*blockProducerInfo + localBPInfo *blockProducerInfo bus = chainbus.New() ) - if fi, err := os.Stat(cfg.DataFile); err == nil && fi.Mode().IsRegular() { - existed = true + // Verify genesis block in config + if cfg.Genesis == nil { + err = ErrNilGenesis + return + } + if ierr = cfg.Genesis.VerifyHash(); ierr != nil { + err = errors.Wrap(ierr, "failed to verify genesis block hash") + return } // Open storage + if fi, err := os.Stat(cfg.DataFile); err == nil && fi.Mode().IsRegular() { + existed = true + } if st, ierr = openStorage(fmt.Sprintf("file:%s", cfg.DataFile)); ierr != nil { err = errors.Wrap(ierr, "failed to open storage") return } + defer func() { + if err != nil { + st.Close() + } + }() - // Storage genesis + // Create initial state from genesis block and store if !existed { - // TODO(leventeliu): reuse chain.replaceAndSwitchToBranch to construct initial state. var init = newMetaState() for _, v := range cfg.Genesis.Transactions { if ierr = init.apply(v); ierr != nil { @@ -134,30 +153,9 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) return } } - var sps []storageProcedure + var sps = init.compileChanges(nil) sps = append(sps, addBlock(0, cfg.Genesis)) - for k, v := range init.dirty.accounts { - if v != nil { - sps = append(sps, updateAccount(v)) - } else { - sps = append(sps, deleteAccount(k)) - } - } - for k, v := range init.dirty.databases { - if v != nil { - sps = append(sps, updateShardChain(v)) - } else { - sps = append(sps, deleteShardChain(k)) - } - } - for k, v := range init.dirty.provider { - if v != nil { - sps = append(sps, updateProvider(v)) - } else { - sps = append(sps, deleteProvider(k)) - } - } - sps = append(sps, updateIrreversible(cfg.Genesis.SignedHeader.BlockHash)) + sps = append(sps, updateIrreversible(cfg.Genesis.SignedHeader.DataHash)) if ierr = store(st, sps, nil); ierr != nil { err = errors.Wrap(ierr, "failed to initialize storage") return @@ -169,6 +167,11 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) err = errors.Wrap(ierr, "failed to load data from storage") return } + if persistedGenesis := irre.ancestorByCount(0); persistedGenesis == nil || + !persistedGenesis.hash.IsEqual(cfg.Genesis.BlockHash()) { + err = ErrGenesisHashNotMatch + return + } for _, v := range heads { log.WithFields(log.Fields{ "irre_hash": irre.hash.Short(4), @@ -206,12 +209,11 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } // Setup peer list - if locSvIndex, ok = cfg.Peers.Find(cfg.NodeID); !ok { - err = ErrLocalNodeNotFound + if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers, cfg.Mode == APINodeMode); err != nil { return } if t = cfg.ConfirmThreshold; t <= 0.0 { - t = float64(2) / 3.0 + t = conf.DefaultConfirmThreshold } if m = uint32(math.Ceil(float64(l)*t + 1)); m > l { m = l @@ -225,26 +227,25 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) wg: &sync.WaitGroup{}, server: cfg.Server, - cl: rpc.NewCaller(), + caller: rpc.NewCaller(), - st: st, - bs: bus, + storage: st, + chainBus: bus, - pendingBlocks: make(chan *types.BPBlock), - pendingTxs: make(chan pi.Transaction), + pendingBlocks: make(chan *types.BPBlock), + pendingAddTxReqs: make(chan *types.AddTxReq), address: addr, genesisTime: cfg.Genesis.SignedHeader.Timestamp, period: cfg.Period, tick: cfg.Tick, - peers: cfg.Peers, - nodeID: cfg.NodeID, - confirms: m, - serversNum: l, - locSvIndex: uint32(locSvIndex), - nextHeight: head.head.height + 1, - offset: time.Duration(0), // TODO(leventeliu): initialize offset + bpInfos: bpInfos, + localBPInfo: localBPInfo, + localNodeID: cfg.NodeID, + confirms: m, + nextHeight: head.head.height + 1, + offset: time.Duration(0), // TODO(leventeliu): initialize offset lastIrre: irre, immutable: immutable, @@ -252,13 +253,13 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) headBranch: head, branches: branches, txPool: txPool, + mode: cfg.Mode, } log.WithFields(log.Fields{ - "index": c.locSvIndex, - "bp_number": c.serversNum, - "period": c.period.String(), - "tick": c.tick.String(), - "height": c.head().height, + "local": c.getLocalBPInfo(), + "period": c.period, + "tick": c.tick, + "height": c.head().height, }).Debug("current chain state") return } @@ -283,11 +284,14 @@ func (c *Chain) Start() { // Stop stops the main process of the sql-chain. func (c *Chain) Stop() (err error) { // Stop main process - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("stopping chain") + var le = log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + }) + le.Debug("stopping chain") c.stop() - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("chain service stopped") - c.st.Close() - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("chain database closed") + le.Debug("chain service stopped") + c.storage.Close() + le.Debug("chain database closed") // FIXME(leventeliu): RPC server should provide an `unregister` method to detach chain service // instance. Add it to Chain.stop(), then working channels can be closed safely. @@ -301,28 +305,9 @@ func (c *Chain) Stop() (err error) { return } -// checkBlock has following steps: 1. check parent block 2. checkTx 2. merkle tree 3. Hash 4. Signature. -func (c *Chain) checkBlock(b *types.BPBlock) (err error) { - rootHash := merkle.NewMerkle(b.GetTxHashes()).GetRoot() - if !b.SignedHeader.MerkleRoot.IsEqual(rootHash) { - return ErrInvalidMerkleTreeRoot - } - - enc, err := b.SignedHeader.BPHeader.MarshalHash() - if err != nil { - return err - } - h := hash.THashH(enc) - if !b.BlockHash().IsEqual(&h) { - return ErrInvalidHash - } - - return nil -} - func (c *Chain) pushBlock(b *types.BPBlock) (err error) { var ierr error - if ierr = c.checkBlock(b); ierr != nil { + if ierr = b.Verify(); ierr != nil { err = errors.Wrap(ierr, "failed to check block") return } @@ -345,35 +330,16 @@ func (c *Chain) produceBlock(now time.Time) (err error) { if b, err = c.produceAndStoreBlock(now, priv); err != nil { return } - log.WithField("block", b).Debug("produced new block") - - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - func(id proto.NodeID) { - c.goFuncWithTimeout(func(ctx context.Context) { - var ( - req = &types.AdviseNewBlockReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Block: b, - } - resp = &types.AdviseNewBlockResp{} - err = c.cl.CallNodeWithContext( - ctx, id, route.MCCAdviseNewBlock.String(), req, resp) - ) - log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "block_time": b.Timestamp(), - "block_hash": b.BlockHash().Short(4), - "parent_hash": b.ParentHash().Short(4), - }).WithError(err).Debug("broadcasting new block to other peers") - }, c.period) - }(s) - } - } - return err + + log.WithFields(log.Fields{ + "block_time": b.Timestamp(), + "block_hash": b.BlockHash().Short(4), + "parent_hash": b.ParentHash().Short(4), + }).Debug("produced new block") + + // Broadcast to other block producers + c.nonblockingBroadcastBlock(b) + return } // advanceNextHeight does the check and runs block producing if its my turn. @@ -381,8 +347,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { var elapsed = -d log.WithFields(log.Fields{ - "bp_number": c.serversNum, - "node_index": c.locSvIndex, + "local": c.getLocalBPInfo(), "enclosing_height": c.getNextHeight() - 1, "using_timestamp": now.Format(time.RFC3339Nano), "elapsed_seconds": elapsed.Seconds(), @@ -390,7 +355,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { defer c.increaseNextHeight() // Skip if it's not my turn - if !c.isMyTurn() { + if c.mode == APINodeMode || !c.isMyTurn() { return } // Normally, a block producing should start right after the new period, but more time may also @@ -403,6 +368,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { }).Warn("too much time elapsed in the new period, skip this block") return } + expvar.Get("height").(mw.Metric).Add(float64(c.getNextHeight())) log.WithField("height", c.getNextHeight()).Info("producing a new block") if err := c.produceBlock(now); err != nil { log.WithField("now", now.Format(time.RFC3339Nano)).WithError(err).Errorln( @@ -412,19 +378,29 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { func (c *Chain) syncHeads() { for { - var h = c.heightOfTime(c.now()) - if c.getNextHeight() > h { + var ( + now = c.now() + nowHeight uint32 + ) + if now.Before(c.genesisTime) { + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + }).Info("now time is before genesis time, waiting for genesis") + break + } + if nowHeight = c.heightOfTime(c.now()); c.getNextHeight() > nowHeight { break } - for c.getNextHeight() <= h { + for c.getNextHeight() <= nowHeight { // TODO(leventeliu): use the test mode flag to bypass the long-running synchronizing // on startup by now, need better solution here. if conf.GConf.StartupSyncHoles { log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), "next_height": c.getNextHeight(), - "height": h, + "now_height": nowHeight, }).Debug("synchronizing head blocks") - c.syncCurrentHead(c.ctx) + c.blockingSyncCurrentHead(c.ctx, conf.BPStartupRequiredReachableCount) } c.increaseNextHeight() } @@ -450,66 +426,88 @@ func (c *Chain) processBlocks(ctx context.Context) { } } -func (c *Chain) addTx(tx pi.Transaction) { +func (c *Chain) addTx(req *types.AddTxReq) { select { - case c.pendingTxs <- tx: + case c.pendingAddTxReqs <- req: case <-c.ctx.Done(): log.WithError(c.ctx.Err()).Warn("add transaction aborted") } } -func (c *Chain) processTx(tx pi.Transaction) { - if err := tx.Verify(); err != nil { - log.WithError(err).Errorf("failed to verify transaction with hash: %s, address: %s, tx type: %s", - tx.Hash(), tx.GetAccountAddress(), tx.GetTransactionType().String()) +func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { + // Nil check + if addTxReq == nil || addTxReq.Tx == nil { + log.Warn("empty add tx request") return } + + var ( + ttl = addTxReq.TTL + tx = addTxReq.Tx + + txhash = tx.Hash() + addr = tx.GetAccountAddress() + nonce = tx.GetAccountNonce() + + le = log.WithFields(log.Fields{ + "hash": txhash.Short(4), + "address": addr, + "nonce": nonce, + "type": tx.GetTransactionType(), + }) + + base pi.AccountNonce + err error + ) + + // Existense check if ok := func() (ok bool) { c.RLock() defer c.RUnlock() - _, ok = c.txPool[tx.Hash()] + _, ok = c.txPool[txhash] return }(); ok { - log.WithFields(log.Fields{ - "tx_hash": tx.Hash().Short(4), - }).Debug("tx already exists, abort processing") + le.Debug("tx already exists, abort processing") return } - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - func(id proto.NodeID) { - c.goFuncWithTimeout(func(ctx context.Context) { - var ( - req = &types.AddTxReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Tx: tx, - } - resp = &types.AddTxResp{} - err = c.cl.CallNodeWithContext( - ctx, id, route.MCCAddTx.String(), req, resp) - ) - log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "tx_hash": tx.Hash().Short(4), - "tx_type": tx.GetTransactionType(), - }).WithError(err).Debug("broadcasting transaction to other peers") - }, c.tick) - }(s) - } + + // Verify transaction + if err = tx.Verify(); err != nil { + le.WithError(err).Warn("failed to verify transaction") + return + } + if base, err = c.immutableNextNonce(addr); err != nil { + le.WithError(err).Warn("failed to load base nonce of transaction account") + return + } + if nonce < base || nonce >= base+conf.MaxPendingTxsPerAccount { + // TODO(leventeliu): should persist to somewhere for tx query? + le.WithFields(log.Fields{ + "base_nonce": base, + "pending_limit": conf.MaxPendingTxsPerAccount, + }).Warn("invalid transaction nonce") + return } - if err := c.storeTx(tx); err != nil { - log.WithError(err).Error("failed to add transaction") + + // Broadcast to other block producers + if ttl > conf.MaxTxBroadcastTTL { + ttl = conf.MaxTxBroadcastTTL + } + if ttl > 0 { + c.nonblockingBroadcastTx(ttl-1, tx) + } + + // Add to tx pool + if err = c.storeTx(tx); err != nil { + le.WithError(err).Error("failed to add transaction") } } func (c *Chain) processTxs(ctx context.Context) { for { select { - case tx := <-c.pendingTxs: - c.processTx(tx) + case addTxReq := <-c.pendingAddTxReqs: + c.processAddTxReq(addTxReq) case <-ctx.Done(): log.WithError(c.ctx.Err()).Info("abort transaction processing") return @@ -528,7 +526,7 @@ func (c *Chain) mainCycle(ctx context.Context) { select { case <-timer.C: // Try to fetch block at height `nextHeight-1` until enough peers are reachable - if err := c.blockingSyncCurrentHead(ctx); err != nil { + if err := c.blockingSyncCurrentHead(ctx, c.getRequiredConfirms()); err != nil { log.WithError(err).Info("abort main cycle") timer.Reset(0) return @@ -540,7 +538,7 @@ func (c *Chain) mainCycle(ctx context.Context) { c.advanceNextHeight(t, d) } else { log.WithFields(log.Fields{ - "peer": c.peerInfo(), + "peer": c.getLocalBPInfo(), "next_height": c.getNextHeight(), "head_height": c.head().height, "head_block": c.head().hash.Short(4), @@ -556,7 +554,7 @@ func (c *Chain) mainCycle(ctx context.Context) { } } -func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { +func (c *Chain) blockingSyncCurrentHead(ctx context.Context, requiredReachable uint32) (err error) { var ( ticker *time.Ticker interval = 1 * time.Second @@ -567,11 +565,11 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { ticker = time.NewTicker(interval) defer ticker.Stop() for { + if c.syncCurrentHead(ctx, requiredReachable) { + return + } select { case <-ticker.C: - if c.syncCurrentHead(ctx) { - return - } case <-ctx.Done(): err = ctx.Err() return @@ -580,84 +578,28 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { } // syncCurrentHead synchronizes a block at the current height of the local peer from the known -// remote peers. The return value `ok` indicates that there're at less `c.confirms-1` replies -// from these gossip calls. -func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { - var h = c.getNextHeight() - 1 - if c.head().height >= h { +// remote peers. The return value `ok` indicates that there're at least `requiredReachable-1` +// replies from these gossip calls. +func (c *Chain) syncCurrentHead(ctx context.Context, requiredReachable uint32) (ok bool) { + var currentHeight = c.getNextHeight() - 1 + if c.head().height >= currentHeight { ok = true return } + // Initiate blocking gossip calls to fetch block of the current height, // with timeout of one tick. var ( - wg = &sync.WaitGroup{} - cld, ccl = context.WithTimeout(ctx, c.tick) - unreachable uint32 + unreachable = c.blockingFetchBlock(ctx, currentHeight) + serversNum = c.getLocalBPInfo().total ) - defer func() { - wg.Wait() - ccl() - var needConfirms, serversNum = func() (cf, sn uint32) { - c.RLock() - defer c.RUnlock() - cf, sn = c.confirms, c.serversNum - return - }() - if unreachable+needConfirms > serversNum { - log.WithFields(log.Fields{ - "peer": c.peerInfo(), - "sync_head_height": h, - "unreachable_count": unreachable, - }).Warn("one or more block producers are currently unreachable") - ok = false - } else { - ok = true - } - }() - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - wg.Add(1) - go func(id proto.NodeID) { - defer wg.Done() - var ( - err error - req = &types.FetchBlockReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Height: h, - } - resp = &types.FetchBlockResp{} - ) - var le = log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "height": h, - }) - if err = c.cl.CallNodeWithContext( - cld, id, route.MCCFetchBlock.String(), req, resp, - ); err != nil { - le.WithError(err).Warn("failed to fetch block") - atomic.AddUint32(&unreachable, 1) - return - } - if resp.Block == nil { - le.Debug("fetch block request reply: no such block") - return - } - // Push new block from other peers - le.WithFields(log.Fields{ - "parent": resp.Block.ParentHash().Short(4), - "hash": resp.Block.BlockHash().Short(4), - }).Debug("fetch block request reply: found block") - select { - case c.pendingBlocks <- resp.Block: - case <-cld.Done(): - log.WithError(cld.Err()).Warn("add pending block aborted") - } - }(s) - } + + if ok = unreachable+requiredReachable <= serversNum; !ok { + log.WithFields(log.Fields{ + "peer": c.getLocalBPInfo(), + "sync_head_height": currentHeight, + "unreachable_count": unreachable, + }).Warn("one or more block producers are currently unreachable") } return } @@ -671,7 +613,7 @@ func (c *Chain) storeTx(tx pi.Transaction) (err error) { return } - return store(c.st, []storageProcedure{addTx(tx)}, func() { + return store(c.storage, []storageProcedure{addTx(tx)}, func() { c.txPool[k] = tx for _, v := range c.branches { v.addTx(tx) @@ -688,6 +630,9 @@ func (c *Chain) replaceAndSwitchToBranch( sps []storageProcedure up storageCallback height = c.heightOfTime(newBlock.Timestamp()) + + resultTxPool = make(map[hash.Hash]pi.Transaction) + expiredTxs []pi.Transaction ) // Find new irreversible blocks @@ -699,42 +644,46 @@ func (c *Chain) replaceAndSwitchToBranch( newIrres = lastIrre.fetchNodeList(c.lastIrre.count) // Apply irreversible blocks to create dirty map on immutable cache - // - // TODO(leventeliu): use old metaState for now, better use separated dirty cache. + for k, v := range c.txPool { + resultTxPool[k] = v + } for _, b := range newIrres { for _, tx := range b.block.Transactions { if err := c.immutable.apply(tx); err != nil { log.WithError(err).Fatal("failed to apply block to immutable database") } + delete(resultTxPool, tx.Hash()) // Remove confirmed transaction } } - // Prepare storage procedures to update immutable database - sps = append(sps, addBlock(height, newBlock)) - for k, v := range c.immutable.dirty.accounts { - if v != nil { - sps = append(sps, updateAccount(v)) - } else { - sps = append(sps, deleteAccount(k)) - } - } - for k, v := range c.immutable.dirty.databases { - if v != nil { - sps = append(sps, updateShardChain(v)) - } else { - sps = append(sps, deleteShardChain(k)) - } - } - for k, v := range c.immutable.dirty.provider { - if v != nil { - sps = append(sps, updateProvider(v)) - } else { - sps = append(sps, deleteProvider(k)) + // Check tx expiration + for k, v := range resultTxPool { + if base, err := c.immutable.nextNonce( + v.GetAccountAddress(), + ); err != nil || v.GetAccountNonce() < base { + log.WithFields(log.Fields{ + "hash": k.Short(4), + "type": v.GetTransactionType(), + "account": v.GetAccountAddress(), + "nonce": v.GetAccountNonce(), + + "immutable_base_nonce": base, + }).Debug("transaction expired") + expiredTxs = append(expiredTxs, v) + delete(resultTxPool, k) // Remove expired transaction } } + + // Prepare storage procedures to update immutable database + sps = c.immutable.compileChanges(sps) + sps = append(sps, addBlock(height, newBlock)) + sps = append(sps, buildBlockIndex(height, newBlock)) for _, n := range newIrres { sps = append(sps, deleteTxs(n.block.Transactions)) } + if len(expiredTxs) > 0 { + sps = append(sps, deleteTxs(expiredTxs)) + } sps = append(sps, updateIrreversible(lastIrre.hash)) // Prepare callback to update cache @@ -773,19 +722,21 @@ func (c *Chain) replaceAndSwitchToBranch( c.headBranch = newBranch c.headIndex = idx c.branches = brs - // Clear packed transactions + // Clear transactions in each branch for _, b := range newIrres { for _, br := range c.branches { br.clearPackedTxs(b.block.Transactions) } - for _, tx := range b.block.Transactions { - delete(c.txPool, tx.Hash()) - } } + for _, br := range c.branches { + br.clearUnpackedTxs(expiredTxs) + } + // Update txPool to result txPool (packed and expired transactions cleared!) + c.txPool = resultTxPool } // Write to immutable database and update cache - if err = store(c.st, sps, up); err != nil { + if err = store(c.storage, sps, up); err != nil { c.immutable.clean() } // TODO(leventeliu): trigger ChainBus.Publish. @@ -834,7 +785,7 @@ func (c *Chain) applyBlock(bl *types.BPBlock) (err error) { } // Grow a branch while the current branch is not changed if br.head.count <= c.headBranch.head.count { - return store(c.st, + return store(c.storage, []storageProcedure{addBlock(height, bl)}, func() { br.preview.commit() @@ -864,7 +815,7 @@ func (c *Chain) applyBlock(bl *types.BPBlock) (err error) { err = errors.Wrapf(ierr, "failed to fork from %s", parent.hash.Short(4)) return } - return store(c.st, + return store(c.storage, []storageProcedure{addBlock(height, bl)}, func() { c.branches = append(c.branches, br) }, ) @@ -938,7 +889,7 @@ func (c *Chain) nextTick() (t time.Time, d time.Duration) { func (c *Chain) isMyTurn() bool { c.RLock() defer c.RUnlock() - return c.nextHeight%c.serversNum == c.locSvIndex + return c.nextHeight%c.localBPInfo.total == c.localBPInfo.rank } // increaseNextHeight prepares the chain state for the next turn. @@ -948,31 +899,45 @@ func (c *Chain) increaseNextHeight() { c.nextHeight++ } -func (c *Chain) peerInfo() string { - var index, bpNum, nodeID = func() (uint32, uint32, proto.NodeID) { - c.RLock() - defer c.RUnlock() - return c.locSvIndex, c.serversNum, c.nodeID - }() - return fmt.Sprintf("[%d/%d] %s", index, bpNum, nodeID) -} - // heightOfTime calculates the heightOfTime with this sql-chain config of a given time reading. func (c *Chain) heightOfTime(t time.Time) uint32 { return uint32(t.Sub(c.genesisTime) / c.period) } +func (c *Chain) getRequiredConfirms() uint32 { + c.RLock() + defer c.RUnlock() + return c.confirms +} + func (c *Chain) getNextHeight() uint32 { c.RLock() defer c.RUnlock() return c.nextHeight } -func (c *Chain) getPeers() *proto.Peers { +func (c *Chain) getLocalBPInfo() *blockProducerInfo { c.RLock() defer c.RUnlock() - var peers = c.peers.Clone() - return &peers + return c.localBPInfo +} + +// getRemoteBPInfos remove this node from the peer list +func (c *Chain) getRemoteBPInfos() (remoteBPInfos []*blockProducerInfo) { + var localBPInfo, bpInfos = func() (*blockProducerInfo, []*blockProducerInfo) { + c.RLock() + defer c.RUnlock() + return c.localBPInfo, c.bpInfos + }() + + for _, info := range bpInfos { + if info.nodeID.IsEqual(&localBPInfo.nodeID) { + continue + } + remoteBPInfos = append(remoteBPInfos, info) + } + + return remoteBPInfos } func (c *Chain) lastIrreversibleBlock() *blockNode { diff --git a/blockproducer/chain_gossip.go b/blockproducer/chain_gossip.go new file mode 100644 index 000000000..4080933c2 --- /dev/null +++ b/blockproducer/chain_gossip.go @@ -0,0 +1,136 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockproducer + +import ( + "context" + "sync" + "sync/atomic" + + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func (c *Chain) nonblockingBroadcastBlock(block *types.BPBlock) { + for _, info := range c.getRemoteBPInfos() { + func(remote *blockProducerInfo) { + c.goFuncWithTimeout(func(ctx context.Context) { + var ( + req = &types.AdviseNewBlockReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + Block: block, + } + err = c.caller.CallNodeWithContext( + ctx, remote.nodeID, route.MCCAdviseNewBlock.String(), req, nil) + ) + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + "remote": remote, + "block_time": block.Timestamp(), + "block_hash": block.BlockHash().Short(4), + "parent_hash": block.ParentHash().Short(4), + }).WithError(err).Debug("broadcast new block to other peers") + }, c.period) + }(info) + } +} + +func (c *Chain) nonblockingBroadcastTx(ttl uint32, tx pi.Transaction) { + for _, info := range c.getRemoteBPInfos() { + func(remote *blockProducerInfo) { + c.goFuncWithTimeout(func(ctx context.Context) { + var ( + req = &types.AddTxReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + TTL: ttl, + Tx: tx, + } + err = c.caller.CallNodeWithContext( + ctx, remote.nodeID, route.MCCAddTx.String(), req, nil) + ) + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + "remote": remote, + "hash": tx.Hash().Short(4), + "address": tx.GetAccountAddress(), + "type": tx.GetTransactionType(), + }).WithError(err).Debug("broadcast transaction to other peers") + }, c.tick) + }(info) + } +} + +func (c *Chain) blockingFetchBlock(ctx context.Context, h uint32) (unreachable uint32) { + var ( + cld, ccl = context.WithTimeout(ctx, c.tick) + wg = &sync.WaitGroup{} + ) + defer func() { + wg.Wait() + ccl() + }() + for _, info := range c.getRemoteBPInfos() { + wg.Add(1) + go func(remote *blockProducerInfo) { + defer wg.Done() + var ( + err error + req = &types.FetchBlockReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + Height: h, + } + resp = &types.FetchBlockResp{} + ) + var le = log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + "remote": remote, + "height": h, + }) + if err = c.caller.CallNodeWithContext( + cld, remote.nodeID, route.MCCFetchBlock.String(), req, resp, + ); err != nil { + le.WithError(err).Warn("failed to fetch block") + atomic.AddUint32(&unreachable, 1) + return + } + if resp.Block == nil { + le.Debug("fetch block request reply: no such block") + return + } + // Push new block from other peers + le.WithFields(log.Fields{ + "parent": resp.Block.ParentHash().Short(4), + "hash": resp.Block.BlockHash().Short(4), + }).Debug("fetch block request reply: found block") + select { + case c.pendingBlocks <- resp.Block: + case <-cld.Done(): + log.WithError(cld.Err()).Warn("add pending block aborted") + } + }(info) + } + return +} diff --git a/blockproducer/chain_service.go b/blockproducer/chain_io.go similarity index 82% rename from blockproducer/chain_service.go rename to blockproducer/chain_io.go index ea1187693..7e0755dc2 100644 --- a/blockproducer/chain_service.go +++ b/blockproducer/chain_io.go @@ -33,7 +33,7 @@ func (c *Chain) loadBlock(h hash.Hash) (b *types.BPBlock, err error) { enc []byte out = &types.BPBlock{} ) - if err = c.st.Reader().QueryRow( + if err = c.storage.Reader().QueryRow( `SELECT "encoded" FROM "blocks" WHERE "hash"=?`, h.String(), ).Scan(&enc); err != nil { return @@ -134,3 +134,33 @@ func (c *Chain) loadSQLChainProfiles(addr proto.AccountAddress) []*types.SQLChai defer c.RUnlock() return c.immutable.loadROSQLChains(addr) } + +func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err error) { + c.RLock() + defer c.RUnlock() + var ok bool + + if state, ok = c.headBranch.queryTxState(hash); ok { + return + } + + var ( + count int + querySQL = `SELECT COUNT(*) FROM "indexed_transactions" WHERE "hash" = ?` + ) + if err = c.storage.Reader().QueryRow(querySQL, hash.String()).Scan(&count); err != nil { + return pi.TransactionStateNotFound, err + } + + if count > 0 { + return pi.TransactionStateConfirmed, nil + } + + return pi.TransactionStateNotFound, nil +} + +func (c *Chain) immutableNextNonce(addr proto.AccountAddress) (n pi.AccountNonce, err error) { + c.RLock() + defer c.RUnlock() + return c.immutable.nextNonce(addr) +} diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 1e4afe108..cc6c662a9 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -121,7 +122,7 @@ func TestChain(t *testing.T) { }), }, } - err = genesis.PackAndSignBlock(testingPrivateKey) + err = genesis.SetHash() So(err, ShouldBeNil) begin = genesis.Timestamp() @@ -145,6 +146,32 @@ func TestChain(t *testing.T) { Tick: time.Duration(300 * time.Millisecond), } + Convey("A new chain running before genesis time should be waiting for genesis", func() { + config.Genesis.SignedHeader.Timestamp = time.Now().Add(24 * time.Hour) + err = genesis.SetHash() + So(err, ShouldBeNil) + chain, err = NewChain(config) + So(err, ShouldBeNil) + + var sv = rpc.NewServer() + err = sv.InitRPCServer("localhost:0", testingPrivateKeyFile, []byte{}) + So(err, ShouldBeNil) + defer sv.Stop() + chain.server = sv + chain.confirms = 1 + chain.Start() + defer func() { + err = chain.Stop() + So(err, ShouldBeNil) + chain = nil + }() + time.Sleep(5 * chain.period) + var _, count, height, err = chain.fetchLastIrreversibleBlock() + So(err, ShouldBeNil) + So(count, ShouldEqual, 0) + So(height, ShouldEqual, 0) + }) + chain, err = NewChain(config) So(err, ShouldBeNil) So(chain, ShouldNotBeNil) @@ -286,6 +313,47 @@ func TestChain(t *testing.T) { chain.stat() }) + Convey("The chain should report error if genesis in config is cleared", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis = nil + chain, err = NewChain(config) + So(err, ShouldEqual, ErrNilGenesis) + So(chain, ShouldBeNil) + }) + + Convey("The chain should report error if config is changed", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis.Transactions = append( + config.Genesis.Transactions, + types.NewBaseAccount(&types.Account{ + Address: addr2, + TokenBalance: [5]uint64{1000, 1000, 1000, 1000, 1000}, + }), + ) + chain, err = NewChain(config) + So(errors.Cause(err), ShouldEqual, types.ErrMerkleRootVerification) + So(chain, ShouldBeNil) + }) + + Convey("The chain should report error if config is changed and rehashed", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis.Transactions = append( + config.Genesis.Transactions, + types.NewBaseAccount(&types.Account{ + Address: addr2, + TokenBalance: [5]uint64{1000, 1000, 1000, 1000, 1000}, + }), + ) + err = config.Genesis.SetHash() + So(err, ShouldBeNil) + chain, err = NewChain(config) + So(err, ShouldEqual, ErrGenesisHashNotMatch) + So(chain, ShouldBeNil) + }) + Convey("The chain APIs should return expected results", func() { var ( bl *types.BPBlock @@ -345,13 +413,14 @@ func TestChain(t *testing.T) { chain.confirms = 1 chain.Start() defer func() { - chain.Stop() + err = chain.Stop() + So(err, ShouldBeNil) chain = nil }() - chain.addTx(t1) - chain.addTx(t2) - chain.addTx(t3) - chain.addTx(t4) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t1}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t2}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t3}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t4}) time.Sleep(15 * chain.period) }) }) diff --git a/blockproducer/config.go b/blockproducer/config.go index ff56602e9..d6f5befb7 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -24,12 +24,21 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) +// RunMode defines modes that a bp can run as. +type RunMode int + const ( - blockVersion int32 = 0x01 + // BPMode is the default and normal mode. + BPMode RunMode = iota + + // APINodeMode makes the bp behaviour like an API gateway. It becomes an API + // node, who syncs data from the bp network and exposes JSON-RPC API to users. + APINodeMode ) // Config is the main chain configuration. type Config struct { + Mode RunMode Genesis *types.BPBlock DataFile string @@ -49,6 +58,7 @@ func NewConfig(genesis *types.BPBlock, dataFile string, server *rpc.Server, peers *proto.Peers, nodeID proto.NodeID, period time.Duration, tick time.Duration) *Config { config := Config{ + Mode: BPMode, Genesis: genesis, DataFile: dataFile, Server: server, diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 837b7e7ff..2a6cc74cb 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -27,11 +27,10 @@ var ( ErrInvalidHash = errors.New("Hash is invalid") // ErrExistedTx defines existed tx error. ErrExistedTx = errors.New("Tx existed") - // ErrInvalidMerkleTreeRoot defines invalid merkle tree root error. - ErrInvalidMerkleTreeRoot = errors.New("Block merkle tree root does not match the tx hashes") // ErrParentNotMatch defines invalid parent hash. ErrParentNotMatch = errors.New("Block's parent hash cannot match best block") - + // ErrTooManyTransactionsInBlock defines error of too many transactions in a block. + ErrTooManyTransactionsInBlock = errors.New("too many transactions in block") // ErrBalanceOverflow indicates that there will be an overflow after balance manipulation. ErrBalanceOverflow = errors.New("balance overflow") // ErrInsufficientBalance indicates that an account has insufficient balance for spending. @@ -61,16 +60,21 @@ var ( ErrNoEnoughMiner = errors.New("can not get enough miners") // ErrAccountPermissionDeny indicates that the sender does not own admin permission to the sqlchain. ErrAccountPermissionDeny = errors.New("account permission deny") - // ErrNoAdminLeft indicates there is no admin user in sqlchain. - ErrNoAdminLeft = errors.New("no admin user left") + // ErrNoSuperUserLeft indicates there is no super user in sqlchain. + ErrNoSuperUserLeft = errors.New("no super user left") // ErrInvalidPermission indicates that the permission is invalid. ErrInvalidPermission = errors.New("invalid permission") // ErrMinerUserNotMatch indicates that the miner and user do not match. ErrMinerUserNotMatch = errors.New("miner and user do not match") // ErrInsufficientAdvancePayment indicates that the advance payment is insufficient. ErrInsufficientAdvancePayment = errors.New("insufficient advance payment") + // ErrNilGenesis indicates that the genesis block is nil in config. + ErrNilGenesis = errors.New("nil genesis block") // ErrMultipleGenesis indicates that there're multiple genesis blocks while loading. ErrMultipleGenesis = errors.New("multiple genesis blocks") + // ErrGenesisHashNotMatch indicates that the genesis block hash in config doesn't match + // the persisted one. + ErrGenesisHashNotMatch = errors.New("persisted genesis block hash not match") // ErrInvalidGasPrice indicates that the gas price is invalid. ErrInvalidGasPrice = errors.New("gas price is invalid") // ErrInvalidMinerCount indicates that the miner node count is invalid. diff --git a/blockproducer/interfaces/mixins_gen.go b/blockproducer/interfaces/mixins_gen.go index aa7b10d14..257ca5a1f 100644 --- a/blockproducer/interfaces/mixins_gen.go +++ b/blockproducer/interfaces/mixins_gen.go @@ -11,19 +11,18 @@ func (z *TransactionTypeMixin) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendTime(o, z.Timestamp) if oTemp, err := z.TxType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *TransactionTypeMixin) Msgsize() (s int) { - s = 1 + 7 + z.TxType.Msgsize() + 10 + hsp.TimeSize + s = 1 + 10 + hsp.TimeSize + 7 + z.TxType.Msgsize() return } diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index 35595d5c2..f23e11a56 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -30,7 +30,7 @@ import ( // AccountNonce defines the an account nonce. type AccountNonce uint32 -// TransactionType defines an transaction type. +// TransactionType defines a transaction type. type TransactionType uint32 // Bytes encodes a TransactionType to a byte slice. @@ -109,6 +109,42 @@ func (t TransactionType) String() string { } } +// TransactionState defines a transaction state. +type TransactionState uint32 + +// Transaction state transition: +// [o] ---[ Add ]--> Pending ---[ Produce Block ]--> Packed ---[ Irreversible ]--> Confirmed +// | | x +// | x +------[ Prune ]--> Not Found +// x | +// | +------------------------------------[ Expire ]--> Expired +// | +// +----------------------------------------------------------------------> Not Found +const ( + TransactionStatePending TransactionState = iota + TransactionStatePacked + TransactionStateConfirmed + TransactionStateExpired + TransactionStateNotFound +) + +func (s TransactionState) String() string { + switch s { + case TransactionStatePending: + return "Pending" + case TransactionStatePacked: + return "Packed" + case TransactionStateConfirmed: + return "Confirmed" + case TransactionStateExpired: + return "Expired" + case TransactionStateNotFound: + return "Not Found" + default: + return "Unknown" + } +} + // Transaction is the interface implemented by an object that can be verified and processed by // block producers. type Transaction interface { diff --git a/blockproducer/interfaces/transaction_gen.go b/blockproducer/interfaces/transaction_gen.go index c61b6bc1d..7a1c151e9 100644 --- a/blockproducer/interfaces/transaction_gen.go +++ b/blockproducer/interfaces/transaction_gen.go @@ -20,6 +20,20 @@ func (z AccountNonce) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z TransactionState) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendUint32(o, uint32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z TransactionState) Msgsize() (s int) { + s = hsp.Uint32Size + return +} + // MarshalHash marshals for hash func (z TransactionType) MarshalHash() (o []byte, err error) { var b []byte diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 1dfaf3891..79e19abe9 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -104,10 +104,10 @@ func (s *metaState) loadAccountTokenBalance(addr proto.AccountAddress, var o *types.Account defer func() { log.WithFields(log.Fields{ - "account": addr.String(), - "balance": b, - "tokenType": tokenType.String(), - "loaded": loaded, + "account": addr, + "balance": b, + "token_type": tokenType, + "loaded": loaded, }).Debug("queried token account") }() @@ -124,7 +124,7 @@ func (s *metaState) loadAccountTokenBalance(addr proto.AccountAddress, func (s *metaState) storeBaseAccount(k proto.AccountAddress, v *types.Account) (err error) { log.WithFields(log.Fields{ - "addr": k.String(), + "addr": k, "account": v, }).Debug("store account") // Since a transfer tx may create an empty receiver account, this method should try to cover @@ -308,7 +308,7 @@ func (s *metaState) transferAccountToken(transfer *types.Transfer) (err error) { } if realSender != transfer.Sender { err = errors.Wrapf(ErrInvalidSender, - "applyTx failed: real sender %s, sender %s", realSender.String(), transfer.Sender.String()) + "applyTx failed: real sender %s, sender %s", realSender, transfer.Sender) log.WithError(err).Warning("public key not match sender in applyTransaction") return } @@ -458,7 +458,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI Users: []*types.SQLChainUser{ { Address: addr, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), }, }, } @@ -466,7 +466,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI } func (s *metaState) addSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error, ) { var ( src, dst *types.SQLChainProfile @@ -515,8 +515,7 @@ func (s *metaState) deleteSQLChainUser(k proto.DatabaseID, addr proto.AccountAdd } func (s *metaState) alterSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, -) { + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error) { var ( src, dst *types.SQLChainProfile ok bool @@ -545,7 +544,7 @@ func (s *metaState) nextNonce(addr proto.AccountAddress) (nonce pi.AccountNonce, if o, loaded = s.readonly.accounts[addr]; !loaded { err = ErrAccountNotFound log.WithFields(log.Fields{ - "addr": addr.String(), + "addr": addr, }).WithError(err).Error("unexpected error") return } @@ -622,7 +621,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) } if sender != tx.Owner { err = errors.Wrapf(ErrInvalidSender, "match failed with real sender: %s, sender: %s", - sender.String(), tx.Owner.String()) + sender, tx.Owner) return } @@ -650,8 +649,8 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) for _, m := range tx.ResourceMeta.TargetMiners { if po, loaded := s.loadProviderObject(m); !loaded { log.WithFields(log.Fields{ - "miner_addr": m.String(), - "user_addr": sender.String(), + "miner_addr": m, + "user_addr": sender, }).Error(err) err = ErrNoSuchMiner continue @@ -703,7 +702,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) users := make([]*types.SQLChainUser, 1) users[0] = &types.SQLChainUser{ Address: sender, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, Deposit: minAdvancePayment, AdvancePayment: tx.AdvancePayment, @@ -729,15 +728,17 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) // create sqlchain sp := &types.SQLChainProfile{ - ID: dbID, - Address: dbAddr, - Period: sqlchainPeriod, - GasPrice: tx.GasPrice, - TokenType: types.Particle, - Owner: sender, - Users: users, - EncodedGenesis: enc.Bytes(), - Miners: miners, + ID: dbID, + Address: dbAddr, + Period: sqlchainPeriod, + GasPrice: tx.GasPrice, + LastUpdatedHeight: 0, + TokenType: types.Particle, + Owner: sender, + Miners: miners, + Users: users, + EncodedGenesis: enc.Bytes(), + Meta: tx.ResourceMeta, } if _, loaded := s.loadSQLChainObject(dbID); loaded { @@ -856,7 +857,7 @@ func isProviderReqMatch(po *types.ProviderProfile, req *types.CreateDatabase) (m if po.TokenType != req.TokenType { err = errors.New("token type mismatch") log.WithError(err).Debugf("miner's token type: %s, user's token type: %s", - po.TokenType.String(), req.TokenType.String()) + po.TokenType, req.TokenType) return } @@ -864,10 +865,16 @@ func isProviderReqMatch(po *types.ProviderProfile, req *types.CreateDatabase) (m } func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { + log.WithFields(log.Fields{ + "tx_hash": tx.Hash(), + "sender": tx.GetAccountAddress(), + "db_id": tx.TargetSQLChain, + "target_user": tx.TargetUser, + }).Debug("in updatePermission") sender, err := crypto.PubKeyHash(tx.Signee) if err != nil { log.WithFields(log.Fields{ - "tx": tx.Hash().String(), + "tx": tx.Hash(), }).WithError(err).Error("unexpected err") return } @@ -878,7 +885,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { }).WithError(ErrDatabaseNotFound).Error("unexpected error in updatePermission") return ErrDatabaseNotFound } - if tx.Permission >= types.NumberOfUserPermission { + if !tx.Permission.IsValid() { log.WithFields(log.Fields{ "permission": tx.Permission, "dbID": tx.TargetSQLChain.DatabaseID(), @@ -886,35 +893,32 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { return ErrInvalidPermission } - // check whether sender is admin and find targetUser - isAdmin := false - numOfAdmin := 0 + // check whether sender has super privilege and find targetUser + numOfSuperUsers := 0 targetUserIndex := -1 for i, u := range so.Users { - isAdmin = isAdmin || (sender == u.Address && u.Permission == types.Admin) - if u.Permission == types.Admin { - numOfAdmin++ + if sender == u.Address && !u.Permission.HasSuperPermission() { + log.WithFields(log.Fields{ + "sender": sender, + "dbID": tx.TargetSQLChain, + }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updatePermission") + return ErrAccountPermissionDeny + } + if u.Permission.HasSuperPermission() { + numOfSuperUsers++ } if tx.TargetUser == u.Address { targetUserIndex = i } } - if !isAdmin { - log.WithFields(log.Fields{ - "sender": sender, - "dbID": tx.TargetSQLChain, - }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updatePermission") - return ErrAccountPermissionDeny - } - // return error if number of Admin <= 1 and Admin want to revoke permission of itself - if numOfAdmin <= 1 && tx.TargetUser == sender && tx.Permission != types.Admin { - err = ErrNoAdminLeft + if numOfSuperUsers <= 1 && tx.TargetUser == sender && !tx.Permission.HasSuperPermission() { + err = ErrNoSuperUserLeft log.WithFields(log.Fields{ - "sender": sender.String(), - "dbID": tx.TargetSQLChain.String(), - "targetUser": tx.TargetUser.String(), + "sender": sender, + "dbID": tx.TargetSQLChain, + "targetUser": tx.TargetUser, }).WithError(err).Warning("in updatePermission") return } @@ -945,20 +949,19 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { } // check sender's permission - isAdmin := false for _, user := range so.Users { - if sender == user.Address && user.Permission == types.Admin { - isAdmin = true + if sender == user.Address { + if !user.Permission.HasSuperPermission() { + log.WithFields(log.Fields{ + "sender": sender, + "dbID": tx.TargetSQLChain, + }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updateKeys") + return ErrAccountPermissionDeny + } + break } } - if !isAdmin { - log.WithFields(log.Fields{ - "sender": sender, - "dbID": tx.TargetSQLChain, - }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updateKeys") - return ErrAccountPermissionDeny - } // update miner's key keyMap := make(map[proto.AccountAddress]string) @@ -1006,7 +1009,7 @@ func (s *metaState) updateBilling(tx *types.UpdateBilling) (err error) { } for _, userCost := range tx.Users { - log.Debugf("update billing user cost: %s, cost: %d", userCost.User.String(), userCost.Cost) + log.Debugf("update billing user cost: %s, cost: %d", userCost.User, userCost.Cost) costMap[userCost.User] = userCost.Cost if _, ok := userMap[userCost.User]; !ok { userMap[userCost.User] = make(map[proto.AccountAddress]uint64) @@ -1082,7 +1085,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err if realSender != transfer.Sender { err = errors.Wrapf(ErrInvalidSender, - "applyTx failed: real sender %s, sender %s", realSender.String(), transfer.Sender.String()) + "applyTx failed: real sender %s, sender %s", realSender, transfer.Sender) log.WithError(err).Warning("public key not match sender in applyTransaction") return } @@ -1097,7 +1100,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err err = ErrDatabaseNotFound log.WithFields(log.Fields{ "dbid": transfer.Receiver.DatabaseID(), - "sender": transfer.Sender.String(), + "sender": transfer.Sender, }).WithError(err).Warning("database not exist in transferSQLChainTokenBalance") return } @@ -1105,7 +1108,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err err = ErrWrongTokenType log.WithFields(log.Fields{ "dbid": transfer.Receiver.DatabaseID(), - "sender": transfer.Sender.String(), + "sender": transfer.Sender, }).WithError(err).Warning("error token type in transferSQLChainTokenBalance") return } @@ -1113,10 +1116,10 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err if account.TokenBalance[transfer.TokenType] < transfer.Amount { err = ErrInsufficientBalance log.WithFields(log.Fields{ - "addr": account.Address.String(), + "addr": account.Address, "amount": account.TokenBalance[transfer.TokenType], "transfer_amount": transfer.Amount, - "token_type": transfer.TokenType.String(), + "token_type": transfer.TokenType, }).WithError(err).Warning("in transferSQLChainTokenBalance") return } @@ -1245,7 +1248,7 @@ func (s *metaState) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta typ } func (s *metaState) apply(t pi.Transaction) (err error) { - log.Infof("get tx: %s", t.GetTransactionType().String()) + log.Infof("get tx: %s", t.GetTransactionType()) // NOTE(leventeliu): bypass pool in this method. var ( addr = t.GetAccountAddress() @@ -1286,6 +1289,35 @@ func (s *metaState) makeCopy() *metaState { } } +// compileChanges compiles storage procedures for changes in dirty map. +func (s *metaState) compileChanges( + dst []storageProcedure) (results []storageProcedure, +) { + results = dst + for k, v := range s.dirty.accounts { + if v != nil { + results = append(results, updateAccount(v)) + } else { + results = append(results, deleteAccount(k)) + } + } + for k, v := range s.dirty.databases { + if v != nil { + results = append(results, updateShardChain(v)) + } else { + results = append(results, deleteShardChain(k)) + } + } + for k, v := range s.dirty.provider { + if v != nil { + results = append(results, updateProvider(v)) + } else { + results = append(results, deleteProvider(k)) + } + } + return +} + func minDeposit(gasPrice uint64, minerNumber uint64) uint64 { return gasPrice * uint64(conf.GConf.QPS) * conf.GConf.BillingBlockCount * minerNumber diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index f6bca9ac2..21fe2ae02 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -106,11 +106,11 @@ func TestMetaState(t *testing.T) { Convey("The metaState should failed to operate SQLChain for unknown user", func() { err = ms.createSQLChain(addr1, dbID1) So(err, ShouldEqual, ErrAccountNotFound) - err = ms.addSQLChainUser(dbID1, addr1, types.Admin) + err = ms.addSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Admin)) So(err, ShouldEqual, ErrDatabaseNotFound) err = ms.deleteSQLChainUser(dbID1, addr1) So(err, ShouldEqual, ErrDatabaseNotFound) - err = ms.alterSQLChainUser(dbID1, addr1, types.Write) + err = ms.alterSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseNotFound) }) Convey("When new account and database objects are stored", func() { @@ -170,9 +170,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldEqual, ErrDatabaseExists) }) Convey("When new SQLChain users are added", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) Convey("The metaState object should be ok to delete user", func() { err = ms.deleteSQLChainUser(dbID3, addr2) @@ -181,9 +181,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) Convey("When metaState change is committed", func() { @@ -204,9 +204,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) }) @@ -214,9 +214,9 @@ func TestMetaState(t *testing.T) { Convey("When metaState change is committed", func() { ms.commit() Convey("The metaState object should be ok to add users for database", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) }) Convey("The metaState object should report database exists", func() { @@ -992,7 +992,7 @@ func TestMetaState(t *testing.T) { UpdatePermissionHeader: types.UpdatePermissionHeader{ TargetSQLChain: addr1, TargetUser: addr3, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Nonce: cd1.Nonce + 1, }, } @@ -1000,7 +1000,7 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrDatabaseNotFound) - up.Permission = 4 + up.Permission = types.UserPermissionFromRole(types.Void) up.TargetSQLChain = dbAccount err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1009,7 +1009,7 @@ func TestMetaState(t *testing.T) { // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 - up.Permission = types.Admin + up.Permission = types.UserPermissionFromRole(types.Admin) err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1018,7 +1018,7 @@ func TestMetaState(t *testing.T) { // addr3(admin) update addr4 as read up.TargetUser = addr4 up.Nonce = cd2.Nonce - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1034,12 +1034,12 @@ func TestMetaState(t *testing.T) { ms.commit() // addr3(admin) update addr3(admin) as read fail up.TargetUser = addr3 - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) up.Nonce = up.Nonce + 1 err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) - So(errors.Cause(err), ShouldEqual, ErrNoAdminLeft) + So(errors.Cause(err), ShouldEqual, ErrNoSuperUserLeft) // addr1(read) update addr3(admin) fail up.Nonce = cd1.Nonce + 2 err = up.Sign(privKey1) @@ -1050,15 +1050,18 @@ func TestMetaState(t *testing.T) { co, loaded = ms.loadSQLChainObject(dbID) for _, user := range co.Users { if user.Address == addr1 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } if user.Address == addr3 { - So(user.Permission, ShouldEqual, types.Admin) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Admin) continue } if user.Address == addr4 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } } diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index dd244bd9d..1b021614b 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -23,13 +23,13 @@ import ( "strings" "time" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" ) @@ -99,13 +99,8 @@ func (s *ChainRPCService) NextAccountNonce( } // AddTx is the RPC method to add a transaction. -func (s *ChainRPCService) AddTx(req *types.AddTxReq, resp *types.AddTxResp) (err error) { - if req.Tx == nil { - return ErrUnknownTransactionType - } - log.Infof("transaction type: %s, hash: %s, address: %s", - req.Tx.GetTransactionType().String(), req.Tx.Hash(), req.Tx.GetAccountAddress()) - s.chain.addTx(req.Tx) +func (s *ChainRPCService) AddTx(req *types.AddTxReq, _ *types.AddTxResp) (err error) { + s.chain.addTx(req) return } @@ -130,10 +125,23 @@ func (s *ChainRPCService) QuerySQLChainProfile(req *types.QuerySQLChainProfileRe return } +// QueryTxState is the RPC method to query a transaction state. +func (s *ChainRPCService) QueryTxState( + req *types.QueryTxStateReq, resp *types.QueryTxStateResp) (err error, +) { + var state pi.TransactionState + if state, err = s.chain.queryTxState(req.Hash); err != nil { + return + } + resp.Hash = req.Hash + resp.State = state + return +} + // Sub is the RPC method to subscribe some event. func (s *ChainRPCService) Sub(req *types.SubReq, resp *types.SubResp) (err error) { - return s.chain.bs.Subscribe(req.Topic, func(request interface{}, response interface{}) { - s.chain.cl.CallNode(req.NodeID.ToNodeID(), req.Callback, request, response) + return s.chain.chainBus.Subscribe(req.Topic, func(request interface{}, response interface{}) { + s.chain.caller.CallNode(req.NodeID.ToNodeID(), req.Callback, request, response) }) } diff --git a/blockproducer/storage.go b/blockproducer/storage.go index 344772682..414bed504 100644 --- a/blockproducer/storage.go +++ b/blockproducer/storage.go @@ -19,6 +19,7 @@ package blockproducer import ( "bytes" "database/sql" + "encoding/json" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/hash" @@ -35,40 +36,77 @@ var ( ddls = [...]string{ // Chain state tables `CREATE TABLE IF NOT EXISTS "blocks" ( - "height" INT, - "hash" TEXT, - "parent" TEXT, - "encoded" BLOB, - UNIQUE ("hash") -)`, + "height" INT, + "hash" TEXT, + "parent" TEXT, + "encoded" BLOB, + UNIQUE ("hash") + );`, + `CREATE TABLE IF NOT EXISTS "txPool" ( - "type" INT, - "hash" TEXT, - "encoded" BLOB, - UNIQUE ("hash") -)`, + "type" INT, + "hash" TEXT, + "encoded" BLOB, + UNIQUE ("hash") + );`, + `CREATE TABLE IF NOT EXISTS "irreversible" ( - "id" INT, - "hash" TEXT, - UNIQUE ("id") -)`, + "id" INT, + "hash" TEXT, + UNIQUE ("id") + );`, + // Meta state tables `CREATE TABLE IF NOT EXISTS "accounts" ( - "address" TEXT, - "encoded" BLOB, - UNIQUE ("address") -)`, + "address" TEXT, + "encoded" BLOB, + UNIQUE ("address") + );`, + `CREATE TABLE IF NOT EXISTS "shardChain" ( - "address" TEXT, - "id" TEXT, - "encoded" BLOB, - UNIQUE ("address", "id") -)`, + "address" TEXT, + "id" TEXT, + "encoded" BLOB, + UNIQUE ("address", "id") + );`, + `CREATE TABLE IF NOT EXISTS "provider" ( - "address" TEXT, - "encoded" BLOB, - UNIQUE ("address") -)`, + "address" TEXT, + "encoded" BLOB, + UNIQUE ("address") + );`, + + `CREATE TABLE IF NOT EXISTS "indexed_blocks" ( + "height" INTEGER PRIMARY KEY, + "hash" TEXT, + "timestamp" INTEGER, + "version" INTEGER, + "producer" TEXT, + "merkle_root" TEXT, + "parent" TEXT, + "tx_count" INTEGER + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__hash" ON "indexed_blocks" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__timestamp" ON "indexed_blocks" ("timestamp" DESC);`, + + `CREATE TABLE IF NOT EXISTS "indexed_transactions" ( + "block_height" INTEGER, + "tx_index" INTEGER, + "hash" TEXT, + "block_hash" TEXT, + "timestamp" INTEGER, + "tx_type" INTEGER, + "address" TEXT, + "raw" TEXT, + PRIMARY KEY ("block_height", "tx_index") + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__hash" ON "indexed_transactions" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__block_hash" ON "indexed_transactions" ("block_hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__timestamp" ON "indexed_transactions" ("timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__tx_type__timestamp" ON "indexed_transactions" ("tx_type", "timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__address__timestamp" ON "indexed_transactions" ("address", "timestamp" DESC);`, } ) @@ -156,7 +194,49 @@ func addTx(t pi.Transaction) storageProcedure { uint32(t.GetTransactionType()), t.Hash().String(), enc.Bytes()) - return + return err + } +} + +func buildBlockIndex(height uint32, b *types.BPBlock) storageProcedure { + return func(tx *sql.Tx) (err error) { + var p = b.Producer() + if _, err = tx.Exec(`INSERT OR REPLACE INTO "indexed_blocks" + ("height", "hash", "timestamp", "version", "producer", + "merkle_root", "parent", "tx_count") VALUES (?,?,?,?,?,?,?,?)`, + height, + b.BlockHash().String(), + b.Timestamp().UnixNano(), + b.SignedHeader.Version, + p.String(), + b.SignedHeader.MerkleRoot.String(), + b.ParentHash().String(), + len(b.Transactions), + ); err != nil { + return err + } + + for txIndex, t := range b.Transactions { + var ( + addr = t.GetAccountAddress() + raw, _ = json.Marshal(t) + ) + if _, err := tx.Exec(`INSERT OR REPLACE INTO "indexed_transactions" + ("block_height", "tx_index", "hash", "block_hash", "timestamp", + "tx_type", "address", "raw") VALUES (?,?,?,?,?,?,?,?)`, + height, + txIndex, + t.Hash().String(), + b.BlockHash().String(), + t.GetTimestamp().UnixNano(), + t.GetTransactionType(), + addr.String(), + string(raw), + ); err != nil { + return err + } + } + return nil } } diff --git a/client/_example/gdpaverage.go b/client/_example/gdpaverage/gdpaverage.go similarity index 97% rename from client/_example/gdpaverage.go rename to client/_example/gdpaverage/gdpaverage.go index f3a1e098e..4d1577061 100644 --- a/client/_example/gdpaverage.go +++ b/client/_example/gdpaverage/gdpaverage.go @@ -21,6 +21,7 @@ import ( "flag" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -28,11 +29,13 @@ func main() { log.SetLevel(log.DebugLevel) var config, password, dsn string - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") + flag.StringVar(&config, "config", "~/.cql/config.yaml", "config file path") flag.StringVar(&dsn, "dsn", "", "database url") flag.StringVar(&password, "password", "", "master key password for covenantsql") flag.Parse() + config = utils.HomeDirExpand(config) + err := client.Init(config, []byte(password)) if err != nil { log.Fatal(err) diff --git a/client/_example/simple.go b/client/_example/simple.go index 6b24f4690..75ca77305 100644 --- a/client/_example/simple.go +++ b/client/_example/simple.go @@ -21,33 +21,27 @@ import ( "flag" "fmt" - "github.com/CovenantSQL/CovenantSQL/client" + _ "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { log.SetLevel(log.InfoLevel) - var config, password, dsn string + var dsn string - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.StringVar(&dsn, "dsn", "", "Database url") flag.Parse() - err := client.Init(config, []byte(password)) - if err != nil { - log.Fatal(err) - } - - if dsn == "" { - meta := client.ResourceMeta{} - meta.Node = 2 - dsn, err = client.Create(meta) - if err != nil { - log.Fatal(err) - } - defer client.Drop(dsn) - } + // If your CovenantSQL config.yaml is not in ~/.cql/config.yaml + // Uncomment and edit following code + /* + config := "/data/myconfig/config.yaml" + password := "mypassword" + err := client.Init(config, []byte(password)) + if err != nil { + log.Fatal(err) + } + */ db, err := sql.Open("covenantsql", dsn) if err != nil { diff --git a/client/conn.go b/client/conn.go index b0ef613af..5715cad09 100644 --- a/client/conn.go +++ b/client/conn.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -150,7 +151,7 @@ ackWorkerLoop: oneTime.Do(func() { pc = rpc.NewPersistentCaller(c.pCaller.TargetID) }) - if err = ack.Sign(c.parent.privKey, false); err != nil { + if err = ack.Sign(c.parent.privKey); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") continue } @@ -158,13 +159,13 @@ ackWorkerLoop: var ackRes types.AckResponse // send ack back if err = pc.Call(route.DBSAck.String(), ack, &ackRes); err != nil { - log.WithError(err).Warning("send ack failed") + log.WithError(err).Debug("send ack failed") continue } } if pc != nil { - pc.CloseStream() + pc.Close() } log.Debug("ack worker quiting") @@ -173,7 +174,7 @@ ackWorkerLoop: func (c *pconn) close() error { c.stopAckWorkers() if c.pCaller != nil { - c.pCaller.CloseStream() + c.pCaller.Close() } return nil } @@ -237,6 +238,8 @@ func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e // ExecContext implements the driver.ExecerContext.ExecContext method. func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (result driver.Result, err error) { + defer trace.StartRegion(ctx, "dbExec").End() + if atomic.LoadInt32(&c.closed) != 0 { err = driver.ErrBadConn return @@ -246,7 +249,7 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name sq := convertQuery(query, args) var affectedRows, lastInsertID int64 - if affectedRows, lastInsertID, _, err = c.addQuery(types.WriteQuery, sq); err != nil { + if affectedRows, lastInsertID, _, err = c.addQuery(ctx, types.WriteQuery, sq); err != nil { return } @@ -260,6 +263,8 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name // QueryContext implements the driver.QueryerContext.QueryContext method. func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (rows driver.Rows, err error) { + defer trace.StartRegion(ctx, "dbQuery").End() + if atomic.LoadInt32(&c.closed) != 0 { err = driver.ErrBadConn return @@ -267,7 +272,7 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam // TODO(xq262144): make use of the ctx argument sq := convertQuery(query, args) - _, _, rows, err = c.addQuery(types.ReadQuery, sq) + _, _, rows, err = c.addQuery(ctx, types.ReadQuery, sq) return } @@ -289,7 +294,7 @@ func (c *conn) Commit() (err error) { if len(c.queries) > 0 { // send query - if _, _, _, err = c.sendQuery(types.WriteQuery, c.queries); err != nil { + if _, _, _, err = c.sendQuery(context.Background(), types.WriteQuery, c.queries); err != nil { return } } @@ -319,7 +324,7 @@ func (c *conn) Rollback() error { return nil } -func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) addQuery(ctx context.Context, queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { if c.inTransaction { // check query type, enqueue query if queryType == types.ReadQuery { @@ -344,10 +349,10 @@ func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affected "args": query.Args, }).Debug("execute query") - return c.sendQuery(queryType, []types.Query{*query}) + return c.sendQuery(ctx, queryType, []types.Query{*query}) } -func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { var uc *pconn // peer connection used to execute the queries uc = c.leader @@ -399,11 +404,6 @@ func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affe if err = uc.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { return } - - // verify response - if err = response.Verify(); err != nil { - return - } rows = newRows(&response) if queryType == types.WriteQuery { @@ -412,15 +412,19 @@ func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affe } // build ack - uc.ackCh <- &types.Ack{ - Header: types.SignedAckHeader{ - AckHeader: types.AckHeader{ - Response: response.Header, - NodeID: c.localNodeID, - Timestamp: getLocalTime(), + func() { + defer trace.StartRegion(ctx, "ackEnqueue").End() + uc.ackCh <- &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ + Response: response.Header.ResponseHeader, + ResponseHash: response.Header.Hash(), + NodeID: c.localNodeID, + Timestamp: getLocalTime(), + }, }, - }, - } + } + }() return } diff --git a/client/driver.go b/client/driver.go index 0bf78092b..0f0ca1eb5 100644 --- a/client/driver.go +++ b/client/driver.go @@ -31,11 +31,13 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" ) @@ -62,12 +64,15 @@ var ( connIDAvail []uint64 globalSeqNo uint64 randSource = rand.New(rand.NewSource(time.Now().UnixNano())) + + defaultConfigFile = "~/.cql/config.yaml" ) func init() { d := new(covenantSQLDriver) sql.Register(DBScheme, d) sql.Register(DBSchemeAlias, d) + log.Debug("CovenantSQL driver registered.") } // covenantSQLDriver implements sql.Driver interface. @@ -82,8 +87,10 @@ func (d *covenantSQLDriver) Open(dsn string) (conn driver.Conn, err error) { } if atomic.LoadUint32(&driverInitialized) == 0 { - err = ErrNotInitialized - return + err = defaultInit() + if err != nil && err != ErrAlreadyInitialized { + return + } } return newConn(cfg) @@ -96,6 +103,18 @@ type ResourceMeta struct { AdvancePayment uint64 } +func defaultInit() (err error) { + configFile := utils.HomeDirExpand(defaultConfigFile) + if configFile == defaultConfigFile { + //System not support ~ dir, need Init manually. + log.Debugf("Could not find CovenantSQL default config location: %v", configFile) + return ErrNotInitialized + } + + log.Debugf("Using CovenantSQL default config location: %v", configFile) + return Init(configFile, []byte("")) +} + // Init defines init process for client. func Init(configFile string, masterKey []byte) (err error) { if !atomic.CompareAndSwapUint32(&driverInitialized, 0, 1) { @@ -163,6 +182,7 @@ func Create(meta ResourceMeta) (dsn string, err error) { meta.AdvancePayment = DefaultAdvancePayment } + req.TTL = 1 req.Tx = types.NewCreateDatabase(&types.CreateDatabaseHeader{ Owner: clientAddr, ResourceMeta: meta.ResourceMeta, @@ -195,8 +215,15 @@ func WaitDBCreation(ctx context.Context, dsn string) (err error) { if err != nil { return } + + db, err := sql.Open("covenantsql", dsn) + defer db.Close() + if err != nil { + return + } + // wait for creation - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) + err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) return } @@ -252,7 +279,7 @@ func GetTokenBalance(tt types.TokenType) (balance uint64, err error) { // UpdatePermission sends UpdatePermission transaction to chain. func UpdatePermission(targetUser proto.AccountAddress, - targetChain proto.AccountAddress, perm types.UserPermission) (err error) { + targetChain proto.AccountAddress, perm *types.UserPermission) (txHash hash.Hash, err error) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return @@ -299,11 +326,14 @@ func UpdatePermission(targetUser proto.AccountAddress, return } + txHash = up.Hash() return } // TransferToken send Transfer transaction to chain. -func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType types.TokenType) (err error) { +func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType types.TokenType) ( + txHash hash.Hash, err error, +) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return @@ -351,9 +381,55 @@ func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType typ return } + txHash = tran.Hash() return } +// WaitTxConfirmation waits for the transaction with target hash txHash to be confirmed. It also +// returns if any error occurs or a final state is returned from BP. +func WaitTxConfirmation( + ctx context.Context, txHash hash.Hash) (state interfaces.TransactionState, err error, +) { + var ( + ticker = time.NewTicker(1 * time.Second) + method = route.MCCQueryTxState + req = &types.QueryTxStateReq{Hash: txHash} + resp = &types.QueryTxStateResp{} + ) + defer ticker.Stop() + for { + if err = requestBP(method, req, resp); err != nil { + err = errors.Wrapf(err, "failed to call %s", method) + return + } + + state = resp.State + log.WithFields(log.Fields{ + "tx_hash": txHash, + "tx_state": state, + }).Debug("waiting for tx confirmation") + + switch state { + case interfaces.TransactionStatePending: + case interfaces.TransactionStatePacked: + case interfaces.TransactionStateConfirmed, + interfaces.TransactionStateExpired, + interfaces.TransactionStateNotFound: + return + default: + err = errors.Errorf("unknown transaction state %d", state) + return + } + + select { + case <-ticker.C: + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + func getNonce(addr proto.AccountAddress) (nonce interfaces.AccountNonce, err error) { nonceReq := new(types.NextAccountNonceReq) nonceResp := new(types.NextAccountNonceResp) @@ -422,12 +498,12 @@ func runPeerListUpdater() (err error) { if _, err = getPeers(dbID, privKey); err != nil { log.WithField("db", dbID). WithError(err). - Warning("update peers failed") + Debug("update peers failed") // TODO(xq262144), better rpc remote error judgement if strings.Contains(err.Error(), bp.ErrNoSuchDatabase.Error()) { log.WithField("db", dbID). - Warning("database no longer exists, stopped peers update") + Warning("database no longer exists, stopping peers update") peerList.Delete(dbID) } } @@ -485,14 +561,13 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *pro profileReq.DBID = dbID err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) if err != nil { - log.WithError(err).Warning("get sqlchain profile failed in getPeers") + err = errors.Wrap(err, "get sqlchain profile failed in getPeers") return } nodeIDs := make([]proto.NodeID, len(profileResp.Profile.Miners)) if len(profileResp.Profile.Miners) <= 0 { - err = ErrInvalidProfile - log.WithError(err).Warning("unexpected error in getPeers") + err = errors.Wrap(ErrInvalidProfile, "unexpected error in getPeers") return } for i, mi := range profileResp.Profile.Miners { @@ -506,7 +581,7 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *pro } err = peers.Sign(privKey) if err != nil { - log.WithError(err).Warning("sign peers failed in getPeers") + err = errors.Wrap(err, "sign peers failed in getPeers") return } diff --git a/client/driver_test.go b/client/driver_test.go index 7425d1f85..c01ba54cf 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -68,10 +69,13 @@ func TestCreate(t *testing.T) { var dsn string dsn, err = Create(ResourceMeta{}) So(err, ShouldBeNil) + dsnCfg, err := ParseDSN(dsn) + So(err, ShouldBeNil) waitCtx, cancelWait := context.WithTimeout(context.Background(), time.Nanosecond) defer cancelWait() - err = WaitDBCreation(waitCtx, dsn) + // should not use client.WaitDBCreation, sql.Open is not supported in this test case + err = bp.WaitDatabaseCreation(waitCtx, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) So(err, ShouldResemble, context.DeadlineExceeded) // Calculate database ID @@ -90,9 +94,10 @@ func TestCreate(t *testing.T) { UseLeader: true, }) - waitCtx2, cancelWait2 := context.WithTimeout(context.Background(), time.Minute) + waitCtx2, cancelWait2 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelWait2() - err = WaitDBCreation(waitCtx2, dsn) + // should not use client.WaitDBCreation, sql.Open is not supported in this test case + err = bp.WaitDatabaseCreation(waitCtx2, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) So(err, ShouldBeNil) }) } diff --git a/client/helper_test.go b/client/helper_test.go index 0db24c669..e72a71acb 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -18,6 +18,7 @@ package client import ( "bytes" + "database/sql" "fmt" "io/ioutil" "math/rand" @@ -149,8 +150,11 @@ func startTestService() (stopTestService func(), tempDir string, err error) { req = new(types.UpdateService) req.Header.Op = types.CreateDB req.Header.Instance = types.ServiceInstance{ - DatabaseID: dbID, - Peers: peers, + DatabaseID: dbID, + Peers: peers, + ResourceMeta: types.ResourceMeta{ + IsolationLevel: int(sql.LevelReadUncommitted), + }, GenesisBlock: block, } if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { @@ -175,7 +179,7 @@ func startTestService() (stopTestService func(), tempDir string, err error) { return } permStat := &types.PermStat{ - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, } err = dbms.UpdatePermission(dbID, proto.AccountAddress(addr), permStat) diff --git a/client/tx.go b/client/tx.go index 047d1ccfe..ecb8b8999 100644 --- a/client/tx.go +++ b/client/tx.go @@ -21,6 +21,8 @@ import ( "context" "database/sql" "database/sql/driver" + + "github.com/pkg/errors" ) // ExecuteTx starts a transaction, and runs fn in it @@ -39,11 +41,11 @@ func ExecuteTx( func ExecuteInTx(tx driver.Tx, fn func() error) (err error) { err = fn() if err == nil { - // Ignore commit errors. The tx has already been committed by RELEASE. err = tx.Commit() + if err != nil { + err = errors.Wrapf(err, "exec in tx") + } } else { - // We always need to execute a Rollback() so sql.DB releases the - // connection. _ = tx.Rollback() } return diff --git a/cmd/cql-adapter/README.md b/cmd/cql-adapter/README.md index dbc4a3b2e..3787a4ed2 100644 --- a/cmd/cql-adapter/README.md +++ b/cmd/cql-adapter/README.md @@ -42,12 +42,14 @@ Created a new certificate valid for the following names 📜 - "server" The certificate is at "./server.pem" and the key at "./server-key.pem" ✅ + +And move them to ~/.cql/ dir. `````` You can use following interactive command to generate adapter config. ```shell -$ cql-utils -tool adapterconfgen -config config.yaml +$ cql-utils -tool adapterconfgen ListenAddr (default: 0.0.0.0:4661): ⏎ CertificatePath (default: server.pem): ⏎ PrivateKeyPath (default: server-key.pem): ⏎ @@ -58,7 +60,7 @@ WriteCerts (default:): ⏎ StorageDriver (default: covenantsql): ⏎ StorageRoot (default:): ⏎ -$ tail -n 20 config.yaml +$ tail -n 20 ~/.cql/config.yaml ... skipping irrelevant configuration Adapter: ListenAddr: 0.0.0.0:4661 @@ -79,7 +81,7 @@ Adapter: Start the adapter by following commands: ```shell -$ cql-adapter -config config.yaml +$ cql-adapter ``` ### API @@ -266,4 +268,4 @@ curl -v https://e.morenodes.com:11108/v1/query --insecure \ ###### Parameters -**database:** database id \ No newline at end of file +**database:** database id diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go index 9259a5704..7866fefb6 100644 --- a/cmd/cql-adapter/main.go +++ b/cmd/cql-adapter/main.go @@ -26,6 +26,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -40,9 +41,9 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file for adapter") - flag.StringVar(&password, "password", "", "master key password") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for adapter") + flag.StringVar(&password, "password", "", "Master key password") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -55,6 +56,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) diff --git a/cmd/cql-explorer/README.md b/cmd/cql-explorer/README.md index 6bce4872e..3062e854d 100644 --- a/cmd/cql-explorer/README.md +++ b/cmd/cql-explorer/README.md @@ -21,7 +21,7 @@ Generate the main configuration file. Same as [Generating Default Config File in Start the explorer by following commands: ```shell -$ cql-explorer -config config.yaml +$ cql-explorer ``` The available options are: @@ -30,13 +30,13 @@ The available options are: $ cql-explorer --help Usage of cql-explorer: -config string - config file path (default "./config.yaml") + Config file path (default "~/.cql/config.yaml") -interval duration - new block check interval for explorer (default 2s) + New block check interval for explorer (default 2s) -listen string - listen address for http explorer api (default "127.0.0.1:4665") + Listen address for http explorer api (default "127.0.0.1:4665") -password string - master key password for covenantsql + Master key password for covenantsql ``` ### API @@ -179,4 +179,4 @@ __hash__: hash of specified tx } } } -``` \ No newline at end of file +``` diff --git a/cmd/cql-explorer/main.go b/cmd/cql-explorer/main.go index 318764389..b1fbca26f 100644 --- a/cmd/cql-explorer/main.go +++ b/cmd/cql-explorer/main.go @@ -28,6 +28,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -47,10 +48,10 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file path") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4665", "listen address for http explorer api") - flag.DurationVar(&checkInterval, "interval", time.Second*2, "new block check interval for explorer") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4665", "Listen address for http explorer api") + flag.DurationVar(&checkInterval, "interval", time.Second*2, "New block check interval for explorer") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -65,6 +66,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index d1f67bd1e..83362c39d 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -28,6 +28,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -42,9 +43,9 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "config.yaml", "configuration file for covenantsql") - flag.StringVar(&password, "password", "", "master key password for covenantsql") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Configuration file for covenantsql") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -57,6 +58,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) diff --git a/cmd/cql-faucet/verifier.go b/cmd/cql-faucet/verifier.go index 2a7961d49..a37989a73 100644 --- a/cmd/cql-faucet/verifier.go +++ b/cmd/cql-faucet/verifier.go @@ -236,7 +236,7 @@ func (v *Verifier) dispenseOne(r *applicationRecord) (err error) { // decode target account address var targetAddress proto.AccountAddress - req := &pt.AddTxReq{} + req := &pt.AddTxReq{TTL: 1} resp := &pt.AddTxResp{} req.Tx = pt.NewTransfer( &pt.TransferHeader{ diff --git a/cmd/cql-fuse/block.go b/cmd/cql-fuse/block.go index 2454035c5..a71d208cf 100644 --- a/cmd/cql-fuse/block.go +++ b/cmd/cql-fuse/block.go @@ -40,7 +40,7 @@ import ( // BlockSize is the size of each data block. It must not // change throughout the lifetime of the filesystem. -const BlockSize = 4 << 10 // 4KB +const BlockSize = 128 << 10 // 128KB func min(a, b uint64) uint64 { if a < b { diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go index 6fed70bc2..08de77ee5 100644 --- a/cmd/cql-fuse/block_test.go +++ b/cmd/cql-fuse/block_test.go @@ -49,7 +49,6 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -262,11 +261,6 @@ func initTestDB() (*sql.DB, func()) { log.Errorf("create db failed: %v", err) return nil, stopNodes } - dsnCfg, err := client.ParseDSN(dsn) - if err != nil { - log.Errorf("parse dsn failed: %v", err) - return nil, stopNodes - } db, err := sql.Open("covenantsql", dsn) if err != nil { @@ -277,7 +271,7 @@ func initTestDB() (*sql.DB, func()) { // wait for creation var ctx2, cancel2 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel2() - err = bp.WaitDatabaseCreation(ctx2, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx2, dsn) if err != nil { log.Errorf("wait for creation failed: %v", err) return nil, stopNodes @@ -429,6 +423,9 @@ func TestShrinkGrow(t *testing.T) { if data, err = tryGrow(db, data, id, BlockSize*5); err != nil { log.Fatal(err) } + if data, err = tryGrow(db, data, id, BlockSize*999); err != nil { + log.Fatal(err) + } // Shrink it down to 0. if data, err = tryShrink(db, data, id, 0); err != nil { diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go index 25a62eb75..a89986dd7 100644 --- a/cmd/cql-fuse/main.go +++ b/cmd/cql-fuse/main.go @@ -76,37 +76,47 @@ import ( "bazil.org/fuse/fs" _ "bazil.org/fuse/fs/fstestutil" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) var usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) flag.PrintDefaults() } func main() { - var config, dsn, mountPoint, password string - - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") - flag.StringVar(&mountPoint, "mount", "./", "dir to mount") - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + var ( + configFile string + dsn string + mountPoint string + password string + readOnly bool + ) + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&mountPoint, "mount", "./", "Dir to mount") + flag.StringVar(&dsn, "dsn", "", "Database url") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") + flag.BoolVar(&readOnly, "readonly", false, "Mount read only volume") flag.Usage = usage flag.Parse() log.SetLevel(log.InfoLevel) - err := client.Init(config, []byte(password)) + configFile = utils.HomeDirExpand(configFile) + + err := client.Init(configFile, []byte(password)) if err != nil { log.Fatal(err) } + cfg, err := client.ParseDSN(dsn) if err != nil { log.Fatal(err) } - db, err := sql.Open("covenantsql", dsn) + db, err := sql.Open("covenantsql", cfg.FormatDSN()) if err != nil { log.Fatal(err) } @@ -118,13 +128,18 @@ func main() { } cfs := CFS{db} + opts := make([]fuse.MountOption, 0, 5) + opts = append(opts, fuse.FSName("CovenantFS")) + opts = append(opts, fuse.Subtype("CovenantFS")) + opts = append(opts, fuse.LocalVolume()) + opts = append(opts, fuse.VolumeName(cfg.DatabaseID)) + if readOnly { + opts = append(opts, fuse.ReadOnly()) + } // Mount filesystem. c, err := fuse.Mount( mountPoint, - fuse.FSName("CovenantFS"), - fuse.Subtype("CovenantFS"), - fuse.LocalVolume(), - fuse.VolumeName(""), + opts..., ) if err != nil { log.Fatal(err) diff --git a/cmd/cql-minerd/bench.sh b/cmd/cql-minerd/bench.sh index c36b31bb6..837b8d9eb 100755 --- a/cmd/cql-minerd/bench.sh +++ b/cmd/cql-minerd/bench.sh @@ -8,4 +8,10 @@ go test -bench=^BenchmarkMinerOneNoSign$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerTwo$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerTwoNoSign$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerThree$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThreeNoSign$ -benchtime=10s -run ^$ +go test -bench=^BenchmarkMinerThreeNoSign$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerOneWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerOneNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerTwoWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerTwoNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerThreeWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerThreeNoSignWithEventualConsistency$ -benchtime=10s -run ^$ diff --git a/cmd/cql-minerd/benchCustom.sh b/cmd/cql-minerd/benchCustom.sh new file mode 100755 index 000000000..6590d3b74 --- /dev/null +++ b/cmd/cql-minerd/benchCustom.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +make -C ../../ clean && \ +make -C ../../ use_all_cores +export miner_conf_dir=$PWD/../../test/bench_testnet/node_c +go test -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee custom_miner.log +go test -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log + +go test -cpu=1 -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -cpu=1 -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -cpu=1 -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index 89181bd47..c11f14ce5 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -1,27 +1,30 @@ #!/bin/bash +param=$1 + #make -C ../../ clean && \ #make -C ../../ use_all_cores && \ -go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log -go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +if [ "fast" == "$param" ]; then + go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log + go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log +else + go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log + go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +fi diff --git a/cmd/cql-minerd/benchTestnet.sh b/cmd/cql-minerd/benchTestnet.sh new file mode 100755 index 000000000..81e0c2912 --- /dev/null +++ b/cmd/cql-minerd/benchTestnet.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +make -C ../../ clean && \ +make -C ../../ use_all_cores +go test -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee testnet.log +go test -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log + +go test -cpu=1 -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log diff --git a/cmd/cql-minerd/dbms.go b/cmd/cql-minerd/dbms.go index d033f16de..bcd234c8d 100644 --- a/cmd/cql-minerd/dbms.go +++ b/cmd/cql-minerd/dbms.go @@ -38,16 +38,17 @@ import ( var rootHash = hash.Hash{} -func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { +func startDBMS(server *rpc.Server, onCreateDB func()) (dbms *worker.DBMS, err error) { if conf.GConf.Miner == nil { err = errors.New("invalid database config") return } cfg := &worker.DBMSConfig{ - RootDir: conf.GConf.Miner.RootDir, - Server: server, - MaxReqTimeGap: conf.GConf.Miner.MaxReqTimeGap, + RootDir: conf.GConf.Miner.RootDir, + Server: server, + MaxReqTimeGap: conf.GConf.Miner.MaxReqTimeGap, + OnCreateDatabase: onCreateDB, } if dbms, err = worker.NewDBMS(cfg); err != nil { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 04e897266..6ec7a37e5 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -46,13 +46,17 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" + yaml "gopkg.in/yaml.v2" ) var ( baseDir = utils.GetProjectSrcDir() testWorkingDir = FJ(baseDir, "./test/") + gnteConfDir = FJ(testWorkingDir, "./GNTE/conf/node_c/") + testnetConfDir = FJ(baseDir, "./conf/testnet/") logDir = FJ(testWorkingDir, "./log/") testGasPrice uint64 = 1 testAdvancePayment uint64 = 20000000 @@ -84,6 +88,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/leader.cover.out"), + "-metric-web", "0.0.0.0:13122", }, "leader", testWorkingDir, logDir, true, ); err == nil { @@ -95,6 +100,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower1.cover.out"), + "-metric-web", "0.0.0.0:13121", }, "follower1", testWorkingDir, logDir, false, ); err == nil { @@ -106,6 +112,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower2.cover.out"), + "-metric-web", "0.0.0.0:13120", }, "follower2", testWorkingDir, logDir, false, ); err == nil { @@ -146,6 +153,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner0.cover.out"), + "-metric-web", "0.0.0.0:12144", }, "miner0", testWorkingDir, logDir, true, ); err == nil { @@ -159,8 +167,9 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"), + "-metric-web", "0.0.0.0:12145", }, - "miner1", testWorkingDir, logDir, true, + "miner1", testWorkingDir, logDir, false, ); err == nil { nodeCmds = append(nodeCmds, cmd) } else { @@ -172,8 +181,9 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"), + "-metric-web", "0.0.0.0:12146", }, - "miner2", testWorkingDir, logDir, true, + "miner2", testWorkingDir, logDir, false, ); err == nil { nodeCmds = append(nodeCmds, cmd) } else { @@ -185,7 +195,7 @@ func startNodesProfile(bypassSign bool) { ctx := context.Background() bypassArg := "" if bypassSign { - bypassArg = "-bypassSignature" + bypassArg = "-bypass-signature" } // wait for ports to be available @@ -254,10 +264,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8080", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8080", + "-metric-log", bypassArg, }, "miner0", testWorkingDir, logDir, false, @@ -272,10 +282,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8081", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8081", + "-metric-log", bypassArg, }, "miner1", testWorkingDir, logDir, false, @@ -290,10 +300,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8082", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8082", + "-metric-log", bypassArg, }, "miner2", testWorkingDir, logDir, false, @@ -370,8 +380,9 @@ func TestFullProcess(t *testing.T) { // client send create database transaction meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - TargetMiners: minersAddrs, - Node: uint16(len(minersAddrs)), + TargetMiners: minersAddrs, + Node: uint16(len(minersAddrs)), + IsolationLevel: int(sql.LevelReadUncommitted), }, GasPrice: testGasPrice, AdvancePayment: testAdvancePayment, @@ -398,7 +409,7 @@ func TestFullProcess(t *testing.T) { // wait for creation var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx, dsn) So(err, ShouldBeNil) // check sqlchain profile exist @@ -429,7 +440,8 @@ func TestFullProcess(t *testing.T) { } permStat, ok := usersMap[clientAddr] So(ok, ShouldBeTrue) - So(permStat.Permission, ShouldEqual, types.Admin) + So(permStat.Permission, ShouldNotBeNil) + So(permStat.Permission.Role, ShouldEqual, types.Admin) So(permStat.Status, ShouldEqual, types.Normal) _, err = db.Exec("CREATE TABLE test (test int)") @@ -479,7 +491,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) So(resultBytes, ShouldResemble, []byte("ha\001ppy")) - Convey("test query cancel", FailureContinues, func(c C) { + SkipConvey("test query cancel", FailureContinues, func(c C) { /* test cancel write query */ wg := sync.WaitGroup{} wg.Add(1) @@ -537,34 +549,30 @@ func TestFullProcess(t *testing.T) { err = row.Scan(&result) c.So(err, ShouldBeNil) c.So(result, ShouldEqual, 10000000000) - - c.So(err, ShouldBeNil) }) - time.Sleep(20 * time.Second) - - profileReq = &types.QuerySQLChainProfileReq{} - profileResp = &types.QuerySQLChainProfileResp{} - profileReq.DBID = dbID - err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) + ctx2, ccl2 := context.WithTimeout(context.Background(), 3*time.Minute) + defer ccl2() + err = waitProfileChecking(ctx2, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.AdvancePayment != testAdvancePayment { + return true + } + } + return false + }) So(err, ShouldBeNil) - for _, user := range profileResp.Profile.Users { - log.Infof("user (%s) left advance payment: %d", user.Address.String(), user.AdvancePayment) - if user.AdvancePayment == testAdvancePayment { - time.Sleep(20 * time.Second) - break + + ctx3, ccl3 := context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl3() + err = waitProfileChecking(ctx3, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool { + getIncome := false + for _, miner := range profile.Miners { + getIncome = getIncome || (miner.PendingIncome != 0 || miner.ReceivedIncome != 0) } - } - err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) + return getIncome + }) So(err, ShouldBeNil) - for _, user := range profileResp.Profile.Users { - So(user.AdvancePayment, ShouldNotEqual, testAdvancePayment) - } - getIncome := false - for _, miner := range profileResp.Profile.Miners { - getIncome = getIncome || (miner.PendingIncome != 0 || miner.ReceivedIncome != 0) - } - So(getIncome, ShouldBeTrue) err = db.Close() So(err, ShouldBeNil) @@ -573,6 +581,36 @@ func TestFullProcess(t *testing.T) { }) } +func waitProfileChecking(ctx context.Context, period time.Duration, dbID proto.DatabaseID, + checkFunc func(profile *types.SQLChainProfile) bool) (err error) { + var ( + ticker = time.NewTicker(period) + req = &types.QuerySQLChainProfileReq{} + resp = &types.QuerySQLChainProfileResp{} + ) + defer ticker.Stop() + req.DBID = dbID + + for { + select { + case <-ticker.C: + err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp) + if err == nil { + if checkFunc(&resp.Profile) { + return + } + log.WithFields(log.Fields{ + "dbID": resp.Profile.Address, + "num_of_user": len(resp.Profile.Users), + }).Debugf("get profile but failed to check in waitProfileChecking") + } + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + const ROWSTART = 1000000 const TABLENAME = "insert_table0" @@ -610,20 +648,25 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { ii := atomic.AddInt64(&i, 1) index := ROWSTART + ii //start := time.Now() - _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + + ctx, task := trace.NewTask(context.Background(), "BenchInsert") + + _, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ "(?, ?)", index, ii, ) //log.Warnf("insert index = %d %v", index, time.Since(start)) for err != nil && err.Error() == sqlite3.ErrBusy.Error() { // retry forever log.Warnf("index = %d retried", index) - _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + _, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ "(?, ?)", index, ii, ) } if err != nil { b.Fatal(err) } + + task.End() } }) }) @@ -653,9 +696,11 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { } else { //has data before ROWSTART index = rand.Int63n(count - 1) } + + ctx, task := trace.NewTask(context.Background(), "BenchSelect") //log.Debugf("index = %d", index) //start := time.Now() - row := db.QueryRow("SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) + row := db.QueryRowContext(ctx, "SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) //log.Warnf("select index = %d %v", index, time.Since(start)) var result []byte err = row.Scan(&result) @@ -663,6 +708,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { log.Errorf("index = %d", index) b.Fatal(err) } + task.End() } }) }) @@ -685,7 +731,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { So(err, ShouldBeNil) } -func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { +func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualConsistency bool) { log.Warnf("benchmark for %d Miners, BypassSignature: %v", minerCount, bypassSign) asymmetric.BypassSignature = bypassSign if minerCount > 0 { @@ -721,8 +767,12 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { var dsn string if minerCount > 0 { // create - meta := client.ResourceMeta{} - meta.Node = minerCount + meta := client.ResourceMeta{ + ResourceMeta: types.ResourceMeta{ + Node: minerCount, + UseEventualConsistency: useEventualConsistency, + }, + } // wait for chain service var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel1() @@ -747,11 +797,9 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { So(err, ShouldBeNil) // wait for creation - dsnCfg, err := client.ParseDSN(dsn) - So(err, ShouldBeNil) var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx, dsn) So(err, ShouldBeNil) benchDB(b, db, minerCount > 0) @@ -793,9 +841,14 @@ func BenchmarkSQLite(b *testing.B) { }) } -func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { - log.Warnf("benchmark GNTE for %d Miners, BypassSignature: %v", minerCount, bypassSign) - asymmetric.BypassSignature = bypassSign +func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { + benchOutsideMinerWithTargetMinerList(b, minerCount, nil, confDir) +} + +func benchOutsideMinerWithTargetMinerList( + b *testing.B, minerCount uint16, targetMiners []proto.AccountAddress, confDir string, +) { + log.Warnf("benchmark %v for %d Miners:", confDir, minerCount) // Create temp directory testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") @@ -803,9 +856,9 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { panic(err) } defer os.RemoveAll(testDataDir) - clientConf := FJ(testWorkingDir, "./GNTE/conf/node_c/config.yaml") + clientConf := FJ(confDir, "config.yaml") tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(testWorkingDir, "./GNTE/conf/node_c/private.key") + clientKey := FJ(confDir, "private.key") tempKey := FJ(testDataDir, "private.key") utils.CopyFile(clientConf, tempConf) utils.CopyFile(clientKey, tempKey) @@ -813,13 +866,24 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { err = client.Init(tempConf, []byte("")) So(err, ShouldBeNil) + for _, node := range conf.GConf.KnownNodes { + if node.Role == proto.Leader { + log.Infof("Benching started on bp addr: %v", node.Addr) + break + } + } + dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn") var dsn string if minerCount > 0 { // create - meta := client.ResourceMeta{} - meta.Node = minerCount - meta.AdvancePayment = 1000000000 + meta := client.ResourceMeta{ + ResourceMeta: types.ResourceMeta{ + TargetMiners: targetMiners, + Node: minerCount, + }, + AdvancePayment: 1000000000, + } // wait for chain service var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel1() @@ -831,11 +895,19 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { dsn, err = client.Create(meta) So(err, ShouldBeNil) log.Infof("the created database dsn is %v", dsn) + + // wait for creation + var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + err = client.WaitDBCreation(ctx, dsn) + So(err, ShouldBeNil) + err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666) if err != nil { log.Errorf("write .dsn failed: %v", err) } defer os.Remove(dsnFile) + defer client.Drop(dsn) } else { dsn = os.Getenv("DSN") } @@ -843,90 +915,175 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - dsnCfg, err := client.ParseDSN(dsn) - So(err, ShouldBeNil) - - // wait for creation - var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) - So(err, ShouldBeNil) - benchDB(b, db, minerCount > 0) - - err = client.Drop(dsn) - So(err, ShouldBeNil) - time.Sleep(5 * time.Second) - stopNodes() } func BenchmarkMinerOneNoSign(b *testing.B) { Convey("bench single node", b, func() { - benchMiner(b, 1, true) + benchMiner(b, 1, true, false) }) } func BenchmarkMinerTwoNoSign(b *testing.B) { Convey("bench two node", b, func() { - benchMiner(b, 2, true) + benchMiner(b, 2, true, false) }) } func BenchmarkMinerThreeNoSign(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 3, true) + benchMiner(b, 3, true, false) }) } func BenchmarkMinerOne(b *testing.B) { Convey("bench single node", b, func() { - benchMiner(b, 1, false) + benchMiner(b, 1, false, false) }) } func BenchmarkMinerTwo(b *testing.B) { Convey("bench two node", b, func() { - benchMiner(b, 2, false) + benchMiner(b, 2, false, false) }) } func BenchmarkMinerThree(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 3, false) + benchMiner(b, 3, false, false) + }) +} + +func BenchmarkMinerOneNoSignWithEventualConsistency(b *testing.B) { + Convey("bench single node", b, func() { + benchMiner(b, 1, true, true) + }) +} + +func BenchmarkMinerTwoNoSignWithEventualConsistency(b *testing.B) { + Convey("bench two node", b, func() { + benchMiner(b, 2, true, true) + }) +} + +func BenchmarkMinerThreeNoSignWithEventualConsistency(b *testing.B) { + Convey("bench three node", b, func() { + benchMiner(b, 3, true, true) + }) +} + +func BenchmarkMinerOneWithEventualConsistency(b *testing.B) { + Convey("bench single node", b, func() { + benchMiner(b, 1, false, true) + }) +} + +func BenchmarkMinerTwoWithEventualConsistency(b *testing.B) { + Convey("bench two node", b, func() { + benchMiner(b, 2, false, true) + }) +} + +func BenchmarkMinerThreeWithEventualConsistency(b *testing.B) { + Convey("bench three node", b, func() { + benchMiner(b, 3, false, true) }) } func BenchmarkClientOnly(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 0, false) + benchMiner(b, 0, false, false) }) } func BenchmarkMinerGNTE1(b *testing.B) { Convey("bench GNTE one node", b, func() { - benchGNTEMiner(b, 1, false) + benchOutsideMiner(b, 1, gnteConfDir) }) } + func BenchmarkMinerGNTE2(b *testing.B) { Convey("bench GNTE two node", b, func() { - benchGNTEMiner(b, 2, false) + benchOutsideMiner(b, 2, gnteConfDir) }) } func BenchmarkMinerGNTE3(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 3, false) + benchOutsideMiner(b, 3, gnteConfDir) }) } func BenchmarkMinerGNTE4(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 4, false) + benchOutsideMiner(b, 4, gnteConfDir) }) } func BenchmarkMinerGNTE8(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 8, false) + benchOutsideMiner(b, 8, gnteConfDir) + }) +} + +func BenchmarkTestnetMiner1(b *testing.B) { + Convey("bench testnet one node", b, func() { + benchOutsideMiner(b, 1, testnetConfDir) + }) +} + +func BenchmarkTestnetMiner2(b *testing.B) { + Convey("bench testnet one node", b, func() { + benchOutsideMiner(b, 2, testnetConfDir) + }) +} + +func BenchmarkTestnetTargetMiner2(b *testing.B) { + var ( + err error + // Public keys of miners for test + publicKeys = []string{ + "0235abfb93031df7bf776332c510a862e48e81eebea76f5e165406af8fec5215d6", + "03aec5337c0a58b8eff96f8ab30518830ad8e329c74bb30b38901a9395c72340f8", + } + ) + Convey("bench testnet one node", b, func() { + var ( + pubKey asymmetric.PublicKey + addr proto.AccountAddress + targetMiners = make([]proto.AccountAddress, len(publicKeys)) + ) + for i, v := range publicKeys { + err = yaml.Unmarshal([]byte(v), &pubKey) + So(err, ShouldBeNil) + addr, err = crypto.PubKeyHash(&pubKey) + So(err, ShouldBeNil) + targetMiners[i] = addr + } + benchOutsideMinerWithTargetMinerList(b, 2, targetMiners, testnetConfDir) + }) +} + +func BenchmarkTestnetMiner3(b *testing.B) { + Convey("bench testnet one node", b, func() { + benchOutsideMiner(b, 3, testnetConfDir) + }) +} + +func BenchmarkCustomMiner1(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 1, os.Getenv("miner_conf_dir")) + }) +} + +func BenchmarkCustomMiner2(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 2, os.Getenv("miner_conf_dir")) + }) +} + +func BenchmarkCustomMiner3(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 3, os.Getenv("miner_conf_dir")) }) } diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index e922164e0..2f8a17a33 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -26,19 +26,17 @@ import ( "os" "os/signal" "runtime" - - "github.com/CovenantSQL/CovenantSQL/metric" - - //"runtime/trace" "syscall" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/CovenantSQL/CovenantSQL/worker" graphite "github.com/cyberdelia/go-metrics-graphite" metrics "github.com/rcrowley/go-metrics" @@ -70,6 +68,7 @@ var ( configFile string genKeyPair bool metricLog bool + metricWeb string // profile cpuProfile string @@ -81,6 +80,7 @@ var ( // other noLogo bool showVersion bool + logLevel string ) const name = `cql-minerd` @@ -88,23 +88,26 @@ const desc = `CovenantSQL is a Distributed Database running on BlockChain` func init() { flag.BoolVar(&noLogo, "nologo", false, "Do not print logo") - flag.BoolVar(&metricLog, "metricLog", false, "Print metrics in log") + flag.BoolVar(&metricLog, "metric-log", false, "Print metrics in log") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&genKeyPair, "genKeyPair", false, "Gen new key pair when no private key found") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&genKeyPair, "gen-keypair", false, "Gen new key pair when no private key found") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") - flag.StringVar(&profileServer, "profileServer", "", "Profile server address, default not started") + flag.StringVar(&profileServer, "profile-server", "", "Profile server address, default not started") flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") - flag.StringVar(&metricGraphite, "metricGraphiteServer", "", "Metric graphite server to push metrics") - flag.StringVar(&traceFile, "traceFile", "", "trace profile") + flag.StringVar(&metricGraphite, "metric-graphite-server", "", "Metric graphite server to push metrics") + flag.StringVar(&metricWeb, "metric-web", "", "Address and port to get internal metrics") + + flag.StringVar(&traceFile, "trace-file", "", "Trace profile") + flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.Usage = func() { - fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) - fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) + _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) + _, _ = fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) flag.PrintDefaults() } } @@ -116,10 +119,10 @@ func initLogs() { } func main() { + flag.Parse() // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.InfoLevel) - flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", @@ -127,6 +130,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) @@ -158,7 +163,7 @@ func main() { } // init profile, if cpuProfile, memProfile length is 0, nothing will be done - utils.StartProfile(cpuProfile, memProfile) + _ = utils.StartProfile(cpuProfile, memProfile) // set generate key pair config conf.GConf.GenerateKeyPair = genKeyPair @@ -184,11 +189,19 @@ func main() { }() } + if len(metricWeb) > 0 { + err = metric.InitMetricWeb(metricWeb) + if err != nil { + log.Errorf("start metric web server on %s failed: %v", metricWeb, err) + os.Exit(-1) + } + } + + // start prometheus collector + reg := metric.StartMetricCollector() + // start period provide service transaction generator go func() { - // start prometheus collector - reg := metric.StartMetricCollector() - tick := time.NewTicker(conf.GConf.Miner.ProvideServiceInterval) defer tick.Stop() @@ -205,7 +218,9 @@ func main() { // start dbms var dbms *worker.DBMS - if dbms, err = startDBMS(server); err != nil { + if dbms, err = startDBMS(server, func() { + sendProvideService(reg) + }); err != nil { log.WithError(err).Fatal("start dbms failed") } @@ -216,7 +231,7 @@ func main() { server.Serve() }() defer func() { - server.Listener.Close() + _ = server.Listener.Close() server.Stop() }() @@ -242,22 +257,22 @@ func main() { go graphite.Graphite(metrics.DefaultRegistry, 5*time.Second, minerName, addr) } - //if traceFile != "" { - // f, err := os.Create(traceFile) - // if err != nil { - // log.WithError(err).Fatal("failed to create trace output file") - // } - // defer func() { - // if err := f.Close(); err != nil { - // log.WithError(err).Fatal("failed to close trace file") - // } - // }() - - // if err := trace.Start(f); err != nil { - // log.WithError(err).Fatal("failed to start trace") - // } - // defer trace.Stop() - //} + if traceFile != "" { + f, err := os.Create(traceFile) + if err != nil { + log.WithError(err).Fatal("failed to create trace output file") + } + defer func() { + if err := f.Close(); err != nil { + log.WithError(err).Fatal("failed to close trace file") + } + }() + + if err := trace.Start(f); err != nil { + log.WithError(err).Fatal("failed to start trace") + } + defer trace.Stop() + } <-signalCh utils.StopProfile() diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index a859440ff..dd6580523 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -19,18 +19,14 @@ package main import ( "fmt" "os" - "strings" "syscall" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -56,7 +52,7 @@ func initNode() (server *rpc.Server, err error) { // init kms routing route.InitKMS(conf.GConf.PubKeyStoreFile) - err = registerNodeToBP(30 * time.Second) + err = rpc.RegisterNodeToBP(30 * time.Second) if err != nil { log.Fatalf("register node to BP failed: %v", err) } @@ -83,53 +79,3 @@ func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, list return } - -func registerNodeToBP(timeout time.Duration) (err error) { - // get local node id - localNodeID, err := kms.GetLocalNodeID() - if err != nil { - err = errors.Wrap(err, "register node to BP") - return - } - - // get local node info - localNodeInfo, err := kms.GetNodeInfo(localNodeID) - if err != nil { - err = errors.Wrap(err, "register node to BP") - return - } - - log.WithField("node", localNodeInfo).Debug("construct local node info") - - pingWaitCh := make(chan proto.NodeID) - bpNodeIDs := route.GetBPs() - for _, bpNodeID := range bpNodeIDs { - go func(ch chan proto.NodeID, id proto.NodeID) { - for { - err := rpc.PingBP(localNodeInfo, id) - if err == nil { - log.Infof("ping BP succeed: %v", localNodeInfo) - ch <- id - return - } - if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { - log.Debug("stop ping non leader BP node") - return - } - - log.Warnf("ping BP failed: %v", err) - time.Sleep(3 * time.Second) - } - }(pingWaitCh, bpNodeID) - } - - select { - case bp := <-pingWaitCh: - close(pingWaitCh) - log.WithField("BP", bp).Infof("ping BP succeed") - case <-time.After(timeout): - return errors.New("ping BP timeout") - } - - return -} diff --git a/cmd/cql-minerd/provide_service.go b/cmd/cql-minerd/provide_service.go index a2e0f78b5..6a269e7dc 100644 --- a/cmd/cql-minerd/provide_service.go +++ b/cmd/cql-minerd/provide_service.go @@ -17,6 +17,7 @@ package main import ( + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -151,6 +152,10 @@ func sendProvideService(reg *prometheus.Registry) { }, ) + if conf.GConf.Miner != nil && len(conf.GConf.Miner.TargetUsers) > 0 { + tx.ProvideServiceHeader.TargetUser = conf.GConf.Miner.TargetUsers + } + tx.Nonce = nonceResp.Nonce if err = tx.Sign(privateKey); err != nil { @@ -158,6 +163,7 @@ func sendProvideService(reg *prometheus.Registry) { return } + req.TTL = 1 req.Tx = tx if err = rpc.RequestBP(route.MCCAddTx.String(), req, resp); err != nil { diff --git a/cmd/cql-minerd/various_metric_test.go b/cmd/cql-minerd/various_metric_test.go new file mode 100644 index 000000000..aabe5ced6 --- /dev/null +++ b/cmd/cql-minerd/various_metric_test.go @@ -0,0 +1,383 @@ +// +build !testbinary + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "database/sql" + "encoding/binary" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kw "github.com/CovenantSQL/CovenantSQL/kayak/wal" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" + x "github.com/CovenantSQL/CovenantSQL/xenomint" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + . "github.com/smartystreets/goconvey/convey" +) + +func BenchmarkDBWrite(b *testing.B) { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + _ = err + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + err = r.Sign(priv) + + var ( + strg xi.Storage + state *x.State + ) + f, _ := ioutil.TempFile("", "f") + _ = f.Close() + _ = os.Remove(f.Name()) + + strg, err = xs.NewSqlite(f.Name()) + if err == nil { + defer strg.Close() + } + state = x.NewState(sql.LevelReadUncommitted, n.ToRawNodeID().ToNodeID(), strg) + defer state.Close(true) + + b.ResetTimer() + b.Run("commit", func(b *testing.B) { + r1 := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "CREATE TABLE insert_table0 (k int, v1 int)", + }, + }, + }, + } + + _ = r1.Sign(priv) + _, _, _ = state.Query(r1, false) + + for i := 0; i != b.N; i++ { + _, _, _ = state.Query(r, false) + } + }) +} + +func BenchmarkSignSignature(b *testing.B) { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + _ = err + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + b.ResetTimer() + b.Run("sign", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = r.Sign(priv) + } + }) + + b.ResetTimer() + b.Run("verify", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = r.Verify() + } + }) + + rs := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: r.Header.RequestHeader, + RequestHash: r.Header.Hash(), + NodeID: n.ToRawNodeID().ToNodeID(), + Timestamp: time.Now().UTC(), + RowCount: 1, + LogOffset: 1, + LastInsertID: 1, + AffectedRows: 1, + }, + }, + } + + _ = rs + + b.ResetTimer() + b.Run("sign nested", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = rs.BuildHash() + } + }) + + b.ResetTimer() + b.Run("verify nested", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = rs.VerifyHash() + } + }) + + var buf *bytes.Buffer + + b.ResetTimer() + b.Run("encode request", func(b *testing.B) { + for i := 0; i != b.N; i++ { + buf, _ = utils.EncodeMsgPack(r) + } + }) + + b.ResetTimer() + b.Run("decode request", func(b *testing.B) { + for i := 0; i != b.N; i++ { + var tr *types.Request + _ = utils.DecodeMsgPack(buf.Bytes(), &tr) + } + }) + + var buf2 *bytes.Buffer + l := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogPrepare, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: buf.Bytes(), + } + + b.ResetTimer() + b.Run("encode to binlog format", func(b *testing.B) { + for i := 0; i != b.N; i++ { + buf2, _ = utils.EncodeMsgPack(l) + _ = buf2 + } + }) + + b.ResetTimer() + b.Run("decode from binlog format", func(b *testing.B) { + for i := 0; i != b.N; i++ { + var l2 *kt.Log + _ = utils.DecodeMsgPack(buf2.Bytes(), &l2) + } + }) + + f, _ := ioutil.TempFile("", "f") + _ = f.Close() + _ = os.Remove(f.Name()) + defer os.Remove(f.Name()) + w, _ := kw.NewLevelDBWal(f.Name()) + defer w.Close() + + var index uint64 + + b.Run("write wal", func(b *testing.B) { + for i := 0; i != b.N; i++ { + index = index + 1 + l.Index = index + _ = w.Write(l) + } + }) + + b.Run("get wal", func(b *testing.B) { + for i := 0; i != b.N; i++ { + index = index - 1 + if index > 0 { + _, _ = w.Get(index) + } + } + }) +} + +func TestComputeMetrics(t *testing.T) { + Convey("compute metrics", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + err = r.Sign(priv) + So(err, ShouldBeNil) + + buf, err := utils.EncodeMsgPack(r) + So(err, ShouldBeNil) + + t.Logf("RequestSize: %v", len(buf.Bytes())) + + l := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogPrepare, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: buf.Bytes(), + } + + buf2, err := utils.EncodeMsgPack(l) + So(err, ShouldBeNil) + + t.Logf("PrepareLogSize: %v", len(buf2.Bytes())) + + respNodeAddr, err := crypto.PubKeyHash(priv.PubKey()) + So(err, ShouldBeNil) + + rs := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: r.Header.RequestHeader, + RequestHash: r.Header.Hash(), + NodeID: n.ToRawNodeID().ToNodeID(), + ResponseAccount: respNodeAddr, + Timestamp: time.Now().UTC(), + RowCount: 1, + LogOffset: 1, + LastInsertID: 1, + AffectedRows: 1, + }, + }, + } + + buf3, err := utils.EncodeMsgPack(rs) + So(err, ShouldBeNil) + + t.Logf("ResponseSize: %v", len(buf3.Bytes())) + + bs := make([]byte, 16) + binary.BigEndian.PutUint64(bs, 1) + binary.BigEndian.PutUint64(bs, 2) + + l2 := kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogCommit, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: bs, + } + + buf4, err := utils.EncodeMsgPack(l2) + So(err, ShouldBeNil) + + t.Logf("CommitLogSize: %v", len(buf4.Bytes())) + }) +} diff --git a/cmd/cql-mysql-adapter/README.md b/cmd/cql-mysql-adapter/README.md index c5a0eee7f..2cf2aca53 100644 --- a/cmd/cql-mysql-adapter/README.md +++ b/cmd/cql-mysql-adapter/README.md @@ -22,7 +22,7 @@ Generate the main configuration file. Same as [Generating Default Config File in Start the mysql adapter by following commands: ```shell -$ cql-mysql-adapter -config config.yaml +$ cql-mysql-adapter ``` The default mysql user is ```root``` and the default mysql password is ```calvin```, which can be modified as optional arguments of mysql adapter. @@ -33,10 +33,10 @@ Avaiable command-line arguments are: ```shell $ cql-mysql-adapter --help Usage of ./cql-mysql-adapter: - -bypassSignature + -bypass-signature Disable signature sign and verify, for testing -config string - config file for mysql adapter (default "./config.yaml") + config file for mysql adapter (default "~/.cql/config.yaml") -listen string listen address for mysql adapter (default "127.0.0.1:4664") -mysql-password string @@ -82,4 +82,4 @@ mysql> show tables; mysql> quit Bye -``` \ No newline at end of file +``` diff --git a/cmd/cql-mysql-adapter/main.go b/cmd/cql-mysql-adapter/main.go index a0322c9ee..0ec23e2b3 100644 --- a/cmd/cql-mysql-adapter/main.go +++ b/cmd/cql-mysql-adapter/main.go @@ -25,6 +25,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -40,28 +41,33 @@ var ( mysqlUser string mysqlPassword string showVersion bool + logLevel string ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file for mysql adapter") - flag.StringVar(&password, "password", "", "master key password") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for mysql adapter") + flag.StringVar(&password, "password", "", "Master key password") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "listen address for mysql adapter") - flag.StringVar(&mysqlUser, "mysql-user", "root", "mysql user for adapter server") - flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "mysql password for adapter server") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "Listen address for mysql adapter") + flag.StringVar(&mysqlUser, "mysql-user", "root", "MySQL user for adapter server") + flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "MySQL password for adapter server") + flag.StringVar(&logLevel, "log-level", "", "Service log level") } func main() { flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) diff --git a/cmd/cql-observer/api.go b/cmd/cql-observer/api.go index 6738d97b6..b2d8c3fb3 100644 --- a/cmd/cql-observer/api.go +++ b/cmd/cql-observer/api.go @@ -596,8 +596,8 @@ func (a *explorerAPI) formatResponseHeader(resp *types.SignedResponseHeader) map "affected_rows": resp.AffectedRows, }, "request": map[string]interface{}{ - "hash": resp.Request.Hash().String(), - "timestamp": a.formatTime(resp.Request.Timestamp), + "hash": resp.GetRequestHash().String(), + "timestamp": a.formatTime(resp.GetRequestTimestamp()), "node": resp.Request.NodeID, "type": resp.Request.QueryType.String(), "count": resp.Request.BatchCount, @@ -609,15 +609,15 @@ func (a *explorerAPI) formatAck(ack *types.SignedAckHeader) map[string]interface return map[string]interface{}{ "ack": map[string]interface{}{ "request": map[string]interface{}{ - "hash": ack.Response.Request.Hash().String(), - "timestamp": a.formatTime(ack.Response.Request.Timestamp), + "hash": ack.GetRequestHash().String(), + "timestamp": a.formatTime(ack.GetRequestTimestamp()), "node": ack.Response.Request.NodeID, "type": ack.Response.Request.QueryType.String(), "count": ack.Response.Request.BatchCount, }, "response": map[string]interface{}{ - "hash": ack.Response.Hash().String(), - "timestamp": a.formatTime(ack.Response.Timestamp), + "hash": ack.GetResponseHash().String(), + "timestamp": a.formatTime(ack.GetResponseTimestamp()), "node": ack.Response.NodeID, "log_id": ack.Response.LogOffset, // savepoint id in eventual consistency mode "last_insert_id": ack.Response.LastInsertID, diff --git a/cmd/cql-observer/config_test.go b/cmd/cql-observer/config_test.go index 9dc0d20c9..e542af247 100644 --- a/cmd/cql-observer/config_test.go +++ b/cmd/cql-observer/config_test.go @@ -89,7 +89,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: @@ -223,7 +223,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index c21f9875a..34162cbc3 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -45,29 +46,33 @@ var ( listenAddr string resetPosition string showVersion bool + logLevel string ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file path") - flag.StringVar(&dbID, "database", "", "database to listen for observation") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&dbID, "database", "", "Database to listen for observation") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&resetPosition, "reset", "", "reset subscribe position") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "listen address for http explorer api") + flag.StringVar(&resetPosition, "reset", "", "Reset subscribe position") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "Listen address for http explorer api") + flag.StringVar(&logLevel, "log-level", "", "Service log level") } func main() { + flag.Parse() // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) - flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 039f9bf28..81e1f35e3 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -35,12 +35,15 @@ import ( "time" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" + "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -252,14 +255,16 @@ func TestFullProcess(t *testing.T) { Convey("test full process", t, func() { var ( - err error - cliPriv *asymmetric.PrivateKey - addr, addr2 proto.AccountAddress - dsn, dsn2 string - cfg, cfg2 *client.Config - dbID, dbID2 string - ctx1, ctx2, ctx3 context.Context - ccl1, ccl2, ccl3 context.CancelFunc + err error + cliPriv, obPriv *asymmetric.PrivateKey + addr, addr2 proto.AccountAddress + dbAddr, dbAddr2, obAddr, cliAddr proto.AccountAddress + dsn, dsn2 string + cfg, cfg2 *client.Config + dbID, dbID2 proto.DatabaseID + nonce interfaces.AccountNonce + ctx1, ctx2, ctx3, ctx4, ctx5 context.Context + ccl1, ccl2, ccl3, ccl4, ccl5 context.CancelFunc ) startNodes() defer stopNodes() @@ -268,7 +273,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) // get miner addresses - cliPriv, _, err = privKeyStoreToAccountAddr( + cliPriv, cliAddr, err = privKeyStoreToAccountAddr( FJ(testWorkingDir, "./observation/node_c/private.key"), []byte{}) So(err, ShouldBeNil) _, addr, err = privKeyStoreToAccountAddr( @@ -277,6 +282,9 @@ func TestFullProcess(t *testing.T) { _, addr2, err = privKeyStoreToAccountAddr( FJ(testWorkingDir, "./observation/node_miner_1/private.key"), []byte{}) So(err, ShouldBeNil) + obPriv, obAddr, err = privKeyStoreToAccountAddr( + FJ(testWorkingDir, "./observation/node_observer/private.key"), []byte{}) + So(err, ShouldBeNil) // wait until bp chain service is ready ctx1, ccl1 = context.WithTimeout(context.Background(), 1*time.Minute) @@ -298,10 +306,87 @@ func TestFullProcess(t *testing.T) { // wait cfg, err = client.ParseDSN(dsn) So(err, ShouldBeNil) - dbID = cfg.DatabaseID + dbID = proto.DatabaseID(cfg.DatabaseID) + dbAddr, err = dbID.AccountAddress() + So(err, ShouldBeNil) ctx2, ccl2 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl2() - err = bp.WaitDatabaseCreation(ctx2, proto.DatabaseID(dbID), db, 3*time.Second) + err = client.WaitDBCreation(ctx2, dsn) + So(err, ShouldBeNil) + + // get nonce for observer + nonce, err = requestNonce(cliAddr) + So(err, ShouldBeNil) + + // update permission for observer + up := types.NewUpdatePermission(&types.UpdatePermissionHeader{ + TargetSQLChain: dbAddr, + TargetUser: obAddr, + Permission: types.UserPermissionFromRole(types.Read), + Nonce: nonce, + }) + err = up.Sign(cliPriv) + So(err, ShouldBeNil) + addTxReq := &types.AddTxReq{} + addTxResp := &types.AddTxResp{} + addTxReq.Tx = up + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // wait for profile permission checking + ctx4, ccl4 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl4() + err = waitProfileChecking(ctx4, 3*time.Second, proto.DatabaseID(dbID), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + log.WithFields(log.Fields{ + "addr": user.Address.String(), + "perm": user.Permission, + "stat": user.Status, + }).Debug("checkFunc 1") + if user.Address == obAddr { + return user.Permission.HasReadPermission() + } + } + return false + }) + So(err, ShouldBeNil) + + // get nonce for ob + nonce, err = requestNonce(obAddr) + So(err, ShouldBeNil) + + // transfer token to ob + tran := types.NewTransfer(&types.TransferHeader{ + Sender: obAddr, + Receiver: dbAddr, + Amount: 100000000, + TokenType: types.Particle, + Nonce: nonce, + }) + err = tran.Sign(obPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = tran + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // check ob status + ctx5, ccl5 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl5() + err = waitProfileChecking(ctx5, 3*time.Second, proto.DatabaseID(dbID), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + log.WithFields(log.Fields{ + "addr": user.Address.String(), + "perm": user.Permission, + "stat": user.Status, + }).Debug("checkFunc 2") + if user.Address == obAddr { + return user.Status.EnableQuery() + } + } + return false + }) So(err, ShouldBeNil) _, err = db.Exec("CREATE TABLE test (test int)") @@ -369,11 +454,11 @@ func TestFullProcess(t *testing.T) { // wait cfg2, err = client.ParseDSN(dsn2) So(err, ShouldBeNil) - dbID2 = cfg2.DatabaseID + dbID2 = proto.DatabaseID(cfg2.DatabaseID) So(dbID, ShouldNotResemble, dbID2) ctx3, ccl3 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl3() - err = bp.WaitDatabaseCreation(ctx3, proto.DatabaseID(dbID2), db2, 3*time.Second) + err = client.WaitDBCreation(ctx3, dsn2) So(err, ShouldBeNil) _, err = db2.Exec("CREATE TABLE test (test int)") @@ -403,7 +488,7 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql-observer.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), - "-database", dbID, "-reset", "oldest", + "-database", string(dbID), "-reset", "oldest", "-test.coverprofile", FJ(baseDir, "./cmd/cql-observer/observer.cover.out"), }, "observer", testWorkingDir, logDir, false, @@ -530,6 +615,77 @@ func TestFullProcess(t *testing.T) { So(err, ShouldNotBeNil) log.Info(err, res) + // test get genesis block by height + res, err = getJSON("v3/head/%v", dbID2) + So(err, ShouldNotBeNil) + + // get nonce for observer + nonce, err = requestNonce(cliAddr) + So(err, ShouldBeNil) + + // update permission for observer + dbAddr2, err = dbID2.AccountAddress() + So(err, ShouldBeNil) + up = types.NewUpdatePermission(&types.UpdatePermissionHeader{ + TargetSQLChain: dbAddr2, + TargetUser: obAddr, + Permission: types.UserPermissionFromRole(types.Read), + Nonce: nonce, + }) + err = up.Sign(cliPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = up + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // wait for profile permission checking + ctx4, ccl4 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl4() + err = waitProfileChecking(ctx4, 3*time.Second, proto.DatabaseID(dbID2), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.Address == obAddr { + return user.Permission.HasReadPermission() + } + } + return false + }) + So(err, ShouldBeNil) + + // get nonce for ob + nonce, err = requestNonce(obAddr) + So(err, ShouldBeNil) + + // transfer token to ob + tran = types.NewTransfer(&types.TransferHeader{ + Sender: obAddr, + Receiver: dbAddr2, + Amount: 100000000, + TokenType: types.Particle, + Nonce: nonce, + }) + err = tran.Sign(obPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = tran + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // check ob status + ctx5, ccl5 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl5() + err = waitProfileChecking(ctx5, 3*time.Second, proto.DatabaseID(dbID2), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.Address == obAddr { + return user.Status.EnableQuery() + } + } + return false + }) + So(err, ShouldBeNil) + // test get genesis block by height res, err = getJSON("v3/head/%v", dbID2) So(err, ShouldBeNil) @@ -544,3 +700,45 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) }) } + +func requestNonce(addr proto.AccountAddress) (nonce interfaces.AccountNonce, err error) { + nonceReq := &types.NextAccountNonceReq{} + nonceResp := &types.NextAccountNonceResp{} + nonceReq.Addr = addr + err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp) + if err != nil { + return + } + nonce = nonceResp.Nonce + return +} + +func waitProfileChecking(ctx context.Context, period time.Duration, dbID proto.DatabaseID, + checkFunc func(profile *types.SQLChainProfile) bool) (err error) { + var ( + ticker = time.NewTicker(period) + req = &types.QuerySQLChainProfileReq{} + resp = &types.QuerySQLChainProfileResp{} + ) + defer ticker.Stop() + req.DBID = dbID + + for { + select { + case <-ticker.C: + err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp) + if err == nil { + if checkFunc(&resp.Profile) { + return + } + log.WithFields(log.Fields{ + "dbID": resp.Profile.Address, + "num_of_user": len(resp.Profile.Users), + }).Debugf("get profile but failed to check in waitProfileChecking") + } + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index b8f539e7a..a2465a368 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -34,6 +34,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/worker" bolt "github.com/coreos/bbolt" ) @@ -283,12 +284,12 @@ func (s *Service) startSubscribe(dbID proto.DatabaseID) (err error) { return } - req := &sqlchain.MuxSubscribeTransactionsReq{} - resp := &sqlchain.MuxSubscribeTransactionsResp{} + req := &worker.SubscribeTransactionsReq{} + resp := &worker.SubscribeTransactionsResp{} req.Height = s.subscription[dbID] req.DatabaseID = dbID - err = s.minerRequest(dbID, route.SQLCSubscribeTransactions.String(), req, resp) + err = s.minerRequest(dbID, route.DBSSubscribeTransactions.String(), req, resp) return } @@ -340,7 +341,7 @@ func (s *Service) addQueryTracker(dbID proto.DatabaseID, height int32, offset in if err = qt.Request.Verify(); err != nil { return } - if err = qt.Response.Verify(); err != nil { + if err = qt.Response.VerifyHash(); err != nil { return } @@ -440,11 +441,11 @@ func (s *Service) stop() (err error) { for dbID := range s.subscription { // send cancel subscription rpc - req := &sqlchain.MuxCancelSubscriptionReq{} - resp := &sqlchain.MuxCancelSubscriptionResp{} + req := &worker.CancelSubscriptionReq{} + resp := &worker.CancelSubscriptionResp{} req.DatabaseID = dbID - if err = s.minerRequest(dbID, route.SQLCCancelSubscription.String(), req, resp); err != nil { + if err = s.minerRequest(dbID, route.DBSCancelSubscription.String(), req, resp); err != nil { // cancel subscription failed log.WithField("db", dbID).WithError(err).Warning("cancel subscription") } diff --git a/cmd/cql-utils/README-zh.md b/cmd/cql-utils/README-zh.md index cb36fd688..37a78bb72 100644 --- a/cmd/cql-utils/README-zh.md +++ b/cmd/cql-utils/README-zh.md @@ -11,19 +11,19 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql-utils ### 生成公私钥对 ``` -$ cql-utils -tool keygen +$ cql-utils -tool confgen Enter master key(press Enter for default: ""): ⏎ Private key file: private.key Public key's hex: 03bc9e90e3301a2f5ae52bfa1f9e033cde81b6b6e7188b11831562bf5847bff4c0 ``` -生成的 private.key 文件即是使用主密码加密过的私钥文件,而输出到屏幕上的字符串就是使用十六进制进行编码的公钥。 +生成的 ~/.cql/private.key 文件即是使用主密码加密过的私钥文件,而输出到屏幕上的字符串就是使用十六进制进行编码的公钥。 ### 使用私钥文件或公钥生成钱包地址 ``` -$ cql-utils -tool addrgen -private private.key +$ cql-utils -tool addrgen Enter master key(default: ""): ⏎ wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 @@ -31,4 +31,4 @@ $ cql-utils -tool addrgen -public 02f2707c1c6955a9019cd9d02ade37b931fbfa286a1163 wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 ``` -你可以通过指定私钥文件,或者把上述的公钥十六进制编码字符串作为命令行参数来直接生成钱包地址。 \ No newline at end of file +你也可以通过-private指定私钥文件,或者把上述的公钥十六进制编码字符串作为命令行参数来直接生成钱包地址。 diff --git a/cmd/cql-utils/README.md b/cmd/cql-utils/README.md index b66ea80b7..d9da3a7ad 100644 --- a/cmd/cql-utils/README.md +++ b/cmd/cql-utils/README.md @@ -11,19 +11,19 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql-utils ### Generate Key Pair ``` -$ cql-utils -tool keygen +$ cql-utils -tool confgen Enter master key(press Enter for default: ""): ⏎ Private key file: private.key Public key's hex: 03bc9e90e3301a2f5ae52bfa1f9e033cde81b6b6e7188b11831562bf5847bff4c0 ``` -The private.key is your encrypted private key file, and the pubkey hex is your public key's hex. +The ~/.cql/private.key is your encrypted private key file, and the pubkey hex is your public key's hex. ### Generate Wallet Address from existing Key ``` -$ cql-utils -tool addrgen -private private.key +$ cql-utils -tool addrgen Enter master key(default: ""): ⏎ wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 @@ -31,4 +31,4 @@ $ cql-utils -tool addrgen -public 02f2707c1c6955a9019cd9d02ade37b931fbfa286a1163 wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 ``` -You can generate your *wallet* address for test net according to your private key or public key. +You can generate your *wallet* address for test net according to your private key(default ~/.cql/private) or public key. diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index cc7aa8ee6..304a9b5d4 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -27,6 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf/testnet" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" yaml "gopkg.in/yaml.v2" ) @@ -36,10 +37,11 @@ var ( ) func init() { - flag.StringVar(&workingRoot, "root", "conf", "confgen root is the working root directory containing all auto-generating keys and certifications") + flag.StringVar(&workingRoot, "root", "~/.cql", "confgen root is the working root directory containing all auto-generating keys and certifications") } func runConfgen() { + workingRoot = utils.HomeDirExpand(workingRoot) if workingRoot == "" { log.Error("root directory is required for confgen") os.Exit(1) @@ -60,7 +62,11 @@ func runConfgen() { os.Exit(1) } if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { - os.RemoveAll(workingRoot) + err = os.RemoveAll(workingRoot) + if err != nil { + log.WithError(err).Error("unexpected error") + os.Exit(1) + } } else { os.Exit(0) } diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 052f5b87c..9099daeff 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -23,6 +23,7 @@ import ( "runtime" "syscall" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/crypto/ssh/terminal" ) @@ -33,6 +34,7 @@ var ( publicKeyHex string privateKeyFile string configFile string + skipMasterKey bool showVersion bool ) @@ -41,10 +43,11 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "tool type, miner, keygen, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") - flag.StringVar(&publicKeyHex, "public", "", "public key hex string to mine node id/nonce") - flag.StringVar(&privateKeyFile, "private", "private.key", "private key file to generate/show") - flag.StringVar(&configFile, "config", "config.yaml", "config file to use") + flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") + flag.StringVar(&publicKeyHex, "public", "", "Public key hex string to mine node id/nonce") + flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "Private key file to generate/show") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file to use") + flag.BoolVar(&skipMasterKey, "skip-master-key", false, "Use empty master key") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -57,6 +60,9 @@ func main() { } log.Infof("cql-utils build: %#v\n", version) + configFile = utils.HomeDirExpand(configFile) + privateKeyFile = utils.HomeDirExpand(privateKeyFile) + switch tool { case "miner": if publicKeyHex == "" && privateKeyFile == "" { @@ -65,13 +71,14 @@ func main() { os.Exit(1) } runMiner() - case "keygen": - if privateKeyFile == "" { - // error - log.Error("privateKey path is required for keygen") - os.Exit(1) - } - runKeygen() + // Disable keygen independent call + //case "keygen": + // if privateKeyFile == "" { + // // error + // log.Error("privateKey path is required for keygen") + // os.Exit(1) + // } + // runKeygen() case "keytool": if privateKeyFile == "" { // error @@ -100,6 +107,9 @@ func main() { } func readMasterKey() (string, error) { + if skipMasterKey { + return "", nil + } fmt.Println("Enter master key(press Enter for default: \"\"): ") bytePwd, err := terminal.ReadPassword(int(syscall.Stdin)) fmt.Println() diff --git a/cmd/cql-utils/rpc.go b/cmd/cql-utils/rpc.go index 775af1b97..bc83f4ceb 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql-utils/rpc.go @@ -22,6 +22,7 @@ import ( "fmt" "reflect" "strings" + "time" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -46,9 +47,10 @@ var ( route.SQLChainRPCName: &sqlchain.MuxService{}, route.BlockProducerRPCName: &bp.ChainRPCService{}, } - rpcName string - rpcEndpoint string - rpcReq string + rpcName string + rpcEndpoint string + rpcReq string + rpcTxWaitConfirm bool ) type canSign interface { @@ -59,6 +61,7 @@ func init() { flag.StringVar(&rpcName, "rpc", "", "rpc name to do test call") flag.StringVar(&rpcEndpoint, "rpc-endpoint", "", "rpc endpoint to do test call") flag.StringVar(&rpcReq, "rpc-req", "", "rpc request to do test call, in json format") + flag.BoolVar(&rpcTxWaitConfirm, "rpc-tx-wait-confirm", false, "wait for transaction confirmation") } func runRPC() { @@ -83,6 +86,7 @@ func runRPC() { if rpcName == route.MCCAddTx.String() { // special type of query if addTxReqType, ok := req.(*types.AddTxReq); ok { + addTxReqType.TTL = 1 addTxReqType.Tx = &pi.TransactionWrapper{} } } @@ -94,9 +98,10 @@ func runRPC() { } // fill nonce if this is a AddTx request + var tx pi.Transaction if rpcName == route.MCCAddTx.String() { if addTxReqType, ok := req.(*types.AddTxReq); ok { - var tx = addTxReqType.Tx + tx = addTxReqType.Tx for { if txWrapper, ok := tx.(*pi.TransactionWrapper); ok { tx = txWrapper.Unwrap() @@ -137,6 +142,42 @@ func runRPC() { // print the response log.Info("got response") spewCfg.Dump(resp) + + if rpcName == route.MCCAddTx.String() && rpcTxWaitConfirm { + log.Info("waiting for transaction confirmation...") + var ( + err error + ticker = time.NewTicker(1 * time.Second) + req = &types.QueryTxStateReq{Hash: tx.Hash()} + resp = &types.QueryTxStateResp{} + ) + defer ticker.Stop() + for { + if err = rpc.NewCaller().CallNode( + proto.NodeID(rpcEndpoint), + route.MCCQueryTxState.String(), + req, resp, + ); err != nil { + log.Fatalf("query transaction state failed: %v", err) + } + switch resp.State { + case pi.TransactionStatePending: + fmt.Print(".") + case pi.TransactionStatePacked: + fmt.Print("+") + case pi.TransactionStateConfirmed: + fmt.Print("✔\n") + return + case pi.TransactionStateExpired, pi.TransactionStateNotFound: + fmt.Print("✘\n") + log.Fatalf("bad transaction state: %s", resp.State) + default: + fmt.Print("✘\n") + log.Fatal("unknown transaction state") + } + <-ticker.C + } + } } func checkAndSign(req interface{}) (err error) { diff --git a/cmd/cql/README-zh.md b/cmd/cql/README-zh.md index b4ca69a3a..a591243b3 100644 --- a/cmd/cql/README-zh.md +++ b/cmd/cql/README-zh.md @@ -19,7 +19,7 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql 使用 `cql` 命令来检查钱包余额: ```bash -$ cql -config conf/config.yaml -get-balance +$ cql -get-balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -36,7 +36,7 @@ INFO[0000] covenant coin balance is: 0 caller="main.go:247 mai ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -config conf/config.yaml -create 1 +$ cql -create 1 INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -48,7 +48,7 @@ INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4 这里 `-create 1` 表示创建一个单节点的 SQLChain。 ```bash -$ cql -config conf/config.yaml -dsn covenantsql://address +$ cql -dsn covenantsql://address ``` `address` 就是你的数据库 ID。 diff --git a/cmd/cql/README.md b/cmd/cql/README.md index 4d3ec5074..ab5b3c9f5 100644 --- a/cmd/cql/README.md +++ b/cmd/cql/README.md @@ -19,7 +19,7 @@ See: [cql-utils doc](https://github.com/CovenantSQL/CovenantSQL/tree/develop/cmd Use `cql` to check your wallet balance: ```bash -$ cql -config conf/config.yaml -get-balance +$ cql -get-balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -37,7 +37,7 @@ You can get a database id when create a new SQL Chain: ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -config conf/config.yaml -create 1 +$ cql -create 1 INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -49,7 +49,7 @@ INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4 Here, `-create 1` refers that there is only one node in SQL Chain. ```bash -$ cql -config conf/config.yaml -dsn covenantsql://address +$ cql -dsn covenantsql://address ``` `address` is database id. diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 1c0efd6a6..825e9f669 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -30,11 +30,16 @@ import ( "runtime" "strconv" "strings" + "time" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/xo/dburl" @@ -67,12 +72,23 @@ var ( transferToken string // transfer token to target account getBalance bool // get balance of current account getBalanceWithTokenName string // get specific token's balance of current account + waitTxConfirmation bool // wait for transaction confirmation before exiting + + waitTxConfirmationMaxDuration time.Duration ) type userPermission struct { TargetChain proto.AccountAddress `json:"chain"` TargetUser proto.AccountAddress `json:"user"` - Perm string `json:"perm"` + Perm json.RawMessage `json:"perm"` +} + +type userPermPayload struct { + // User role to access database. + Role types.UserPermissionRole `json:"role"` + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string `json:"patterns"` } type tranToken struct { @@ -98,7 +114,7 @@ func (v *varsFlag) Set(value string) error { return nil } -func init() { +func usqlRegister() { // set command name of usql text.CommandName = "covenantsql" @@ -170,7 +186,9 @@ func init() { log.Infof("connecting to %#v", url.DSN) // wait for database to become ready - if err = client.WaitDBCreation(context.Background(), dsn); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + if err = client.WaitDBCreation(ctx, dsn); err != nil { return } @@ -195,27 +213,31 @@ func init() { Aliases: []string{}, Override: "", }) +} - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&command, "command", "", "run only single command (SQL or usql internal command) and exit") - flag.StringVar(&fileName, "file", "", "execute commands from file and exit") +func init() { + + flag.StringVar(&dsn, "dsn", "", "Database url") + flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") + flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&noRC, "no-rc", false, "do not read start up file") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&noRC, "no-rc", false, "Do not read start up file") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&outFile, "out", "", "output file") - flag.StringVar(&configFile, "config", "config.yaml", "config file for covenantsql") - flag.StringVar(&password, "password", "", "master key password for covenantsql") - flag.BoolVar(&singleTransaction, "single-transaction", false, "execute as a single transaction (if non-interactive)") - flag.Var(&variables, "variable", "set variable") + flag.StringVar(&outFile, "out", "", "Record stdout to file") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") + flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") + flag.Var(&variables, "variable", "Set variable") // DML flags - flag.StringVar(&createDB, "create", "", "create database, argument can be instance requirement json or simply a node count requirement") - flag.StringVar(&dropDB, "drop", "", "drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") - flag.StringVar(&updatePermission, "update-perm", "", "update user's permission on specific sqlchain") - flag.StringVar(&transferToken, "transfer", "", "transfer token to target account") - flag.BoolVar(&getBalance, "get-balance", false, "get balance of current account") - flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "get specific token's balance of current account, e.g. Particle, Wave, and etc.") + flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") + flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") + flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") + flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") + flag.BoolVar(&getBalance, "get-balance", false, "Get balance of current account") + flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") + flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") } func main() { @@ -225,6 +247,9 @@ func main() { name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } + log.Infof("cql build: %#v\n", version) + + configFile = utils.HomeDirExpand(configFile) var err error @@ -235,6 +260,13 @@ func main() { return } + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have + // enough informations from config to do that currently, so just use a fixed and long enough + // duration. + waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod + + usqlRegister() + if getBalance { var stableCoinBalance, covenantCoinBalance uint64 @@ -321,7 +353,19 @@ func main() { return } + if waitTxConfirmation { + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + err = client.WaitDBCreation(ctx, dsn) + if err != nil { + log.WithError(err).Error("create database failed durating creation") + os.Exit(-1) + return + } + } + log.Infof("the newly created database is: %#v", dsn) + fmt.Printf(dsn) return } @@ -334,22 +378,39 @@ func main() { return } - var p types.UserPermission - p.FromString(perm.Perm) - if p > types.NumberOfUserPermission { - log.WithError(err).Errorf("update permission failed: invalid permission description") + var permPayload userPermPayload + + if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { + // try again using role string representation + if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { + log.WithError(err).Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } + } + + p := &types.UserPermission{ + Role: permPayload.Role, + Patterns: permPayload.Patterns, + } + + if !p.IsValid() { + log.Errorf("update permission failed: invalid permission description") os.Exit(-1) return } - err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) - + txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) if err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) return } + if waitTxConfirmation { + wait(txHash) + } + log.Info("succeed in sending transaction to CovenantSQL") return } @@ -391,13 +452,18 @@ func main() { return } - err = client.TransferToken(tran.TargetUser, amount, unit) + var txHash hash.Hash + txHash, err = client.TransferToken(tran.TargetUser, amount, unit) if err != nil { log.WithError(err).Error("transfer token failed") os.Exit(-1) return } + if waitTxConfirmation { + wait(txHash) + } + log.Info("succeed in sending transaction to CovenantSQL") return } @@ -440,8 +506,21 @@ func main() { bindings = append(bindings, name) } log.Infof("available drivers are: %#v", bindings) - return } + os.Exit(-1) + } +} + +func wait(txHash hash.Hash) { + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + var state, err = client.WaitTxConfirmation(ctx, txHash) + log.WithFields(log.Fields{ + "tx_hash": txHash, + "tx_state": state, + }).WithError(err).Info("wait transaction confirmation") + if err != nil || state != pi.TransactionStateConfirmed { + os.Exit(1) } } diff --git a/cmd/cqld/adapter.go b/cmd/cqld/adapter.go index 047d3b726..8b41a2769 100644 --- a/cmd/cqld/adapter.go +++ b/cmd/cqld/adapter.go @@ -115,7 +115,7 @@ func (s *LocalStorage) Check(req interface{}) (err error) { } // Commit implements kayak.types.Handler.Commit. -func (s *LocalStorage) Commit(req interface{}) (_ interface{}, err error) { +func (s *LocalStorage) Commit(req interface{}, isLeader bool) (_ interface{}, err error) { var kp *KayakPayload var cl *compiledLog var ok bool @@ -246,7 +246,7 @@ func (s *KayakKVServer) Init(storePath string, initNodes []proto.Node) (err erro Command: CmdSet, Data: nodeBuf.Bytes(), } - _, err = s.KVStorage.Commit(payload) + _, err = s.KVStorage.Commit(payload, true) if err != nil { log.WithError(err).Error("init kayak KV commit node failed") return diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 4a5770f1c..72387d2d0 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -24,6 +24,8 @@ import ( "syscall" "time" + "github.com/CovenantSQL/CovenantSQL/api" + bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -40,15 +42,22 @@ import ( ) const ( - kayakServiceName = "Kayak" - kayakMethodName = "Call" - kayakWalFileName = "kayak.ldb" + kayakServiceName = "Kayak" + kayakApplyMethodName = "Apply" + kayakFetchMethodName = "Fetch" + kayakWalFileName = "kayak.ldb" + kayakPrepareTimeout = 5 * time.Second + kayakCommitTimeout = time.Minute + kayakLogWaitTimeout = 10 * time.Second ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { rootPath := conf.GConf.WorkingRoot - genesis := loadGenesis() + genesis, err := loadGenesis() + if err != nil { + return + } var masterKey []byte if !conf.GConf.IsTestMode { @@ -75,6 +84,18 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { return } + mode := bp.BPMode + if wsapiAddr != "" { + mode = bp.APINodeMode + } + + if mode == bp.APINodeMode { + if err = rpc.RegisterNodeToBP(30 * time.Second); err != nil { + log.WithError(err).Fatal("register node to BP") + return + } + } + var server *rpc.Server // create server @@ -94,43 +115,45 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server.Stop() }() - // init storage - log.Info("init storage") - var st *LocalStorage - if st, err = initStorage(conf.GConf.DHTFileName); err != nil { - log.WithError(err).Error("init storage failed") - return - } + if mode == bp.BPMode { + // init storage + log.Info("init storage") + var st *LocalStorage + if st, err = initStorage(conf.GConf.DHTFileName); err != nil { + log.WithError(err).Error("init storage failed") + return err + } - // init kayak - log.Info("init kayak runtime") - var kayakRuntime *kayak.Runtime - if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { - log.WithError(err).Error("init kayak runtime failed") - return - } + // init kayak + log.Info("init kayak runtime") + var kayakRuntime *kayak.Runtime + if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { + log.WithError(err).Error("init kayak runtime failed") + return err + } - // init kayak and consistent - log.Info("init kayak and consistent runtime") - kvServer := &KayakKVServer{ - Runtime: kayakRuntime, - KVStorage: st, - } - dht, err := route.NewDHTService(conf.GConf.DHTFileName, kvServer, true) - if err != nil { - log.WithError(err).Error("init consistent hash failed") - return - } + // init kayak and consistent + log.Info("init kayak and consistent runtime") + kvServer := &KayakKVServer{ + Runtime: kayakRuntime, + KVStorage: st, + } + dht, err := route.NewDHTService(conf.GConf.DHTFileName, kvServer, true) + if err != nil { + log.WithError(err).Error("init consistent hash failed") + return err + } - // set consistent handler to kayak storage - kvServer.KVStorage.consistent = dht.Consistent + // set consistent handler to kayak storage + kvServer.KVStorage.consistent = dht.Consistent - // register service rpc - log.Info("register dht service rpc") - err = server.RegisterService(route.DHTRPCName, dht) - if err != nil { - log.WithError(err).Error("register dht service failed") - return + // register service rpc + log.Info("register dht service rpc") + err = server.RegisterService(route.DHTRPCName, dht) + if err != nil { + log.WithError(err).Error("register dht service failed") + return err + } } // init main chain service @@ -144,16 +167,26 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { conf.GConf.BPPeriod, conf.GConf.BPTick, ) + chainConfig.Mode = mode chain, err := bp.NewChain(chainConfig) if err != nil { log.WithError(err).Error("init chain failed") - return + return err } chain.Start() defer chain.Stop() log.Info(conf.StartSucceedMessage) - //go periodicPingBlockProducer() + + // start json-rpc server + if mode == bp.APINodeMode { + log.Info("wsapi: start service") + go func() { + if err := api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName); err != nil { + log.WithError(err).Error("wsapi: start service") + } + }() + } signalCh := make(chan os.Signal, 1) signal.Notify( @@ -164,7 +197,6 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) <-signalCh - return } @@ -194,13 +226,15 @@ func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.H Handler: h, PrepareThreshold: 1.0, CommitThreshold: 1.0, - PrepareTimeout: time.Second, - CommitTimeout: time.Second * 60, + PrepareTimeout: kayakPrepareTimeout, + CommitTimeout: kayakCommitTimeout, + LogWaitTimeout: kayakLogWaitTimeout, Peers: peers, Wal: logWal, NodeID: node.ID, ServiceName: kayakServiceName, - MethodName: kayakMethodName, + ApplyMethodName: kayakApplyMethodName, + FetchMethodName: kayakFetchMethodName, } // create kayak runtime @@ -223,20 +257,16 @@ func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.H return } -func loadGenesis() *types.BPBlock { +func loadGenesis() (genesis *types.BPBlock, err error) { genesisInfo := conf.GConf.BP.BPGenesis log.WithField("config", genesisInfo).Info("load genesis config") - genesis := &types.BPBlock{ + genesis = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - Version: genesisInfo.Version, - Producer: proto.AccountAddress(genesisInfo.Producer), - MerkleRoot: genesisInfo.MerkleRoot, - ParentHash: genesisInfo.ParentHash, - Timestamp: genesisInfo.Timestamp, + Version: genesisInfo.Version, + Timestamp: genesisInfo.Timestamp, }, - BlockHash: genesisInfo.BlockHash, }, } @@ -253,5 +283,9 @@ func loadGenesis() *types.BPBlock { })) } - return genesis + // Rewrite genesis merkle and block hash + if err = genesis.SetHash(); err != nil { + return + } + return } diff --git a/cmd/cqld/client.go b/cmd/cqld/client.go deleted file mode 100644 index 39a18c47c..000000000 --- a/cmd/cqld/client.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - "net" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "golang.org/x/crypto/ssh/terminal" -) - -func runClient(nodeID proto.NodeID) (err error) { - var idx int - for i, n := range conf.GConf.KnownNodes { - if n.ID == nodeID { - idx = i - break - } - } - - rootPath := conf.GConf.WorkingRoot - pubKeyStorePath := filepath.Join(rootPath, conf.GConf.PubKeyStoreFile) - privateKeyPath := filepath.Join(rootPath, conf.GConf.PrivateKeyFile) - - // read master key - var masterKey []byte - if !conf.GConf.IsTestMode { - fmt.Print("Type in Master key to continue: ") - masterKey, err = terminal.ReadPassword(syscall.Stdin) - if err != nil { - fmt.Printf("Failed to read Master Key: %v", err) - } - fmt.Println("") - } - - err = kms.InitLocalKeyPair(privateKeyPath, masterKey) - if err != nil { - log.WithError(err).Error("init local key pair failed") - return - } - - conf.GConf.KnownNodes[idx].PublicKey, err = kms.GetLocalPublicKey() - if err != nil { - log.WithError(err).Error("get local public key failed") - return - } - //nodeInfo := asymmetric.GetPubKeyNonce(AllNodes[idx].PublicKey, 20, 500*time.Millisecond, nil) - //log.Debugf("client pubkey:\n%x", AllNodes[idx].PublicKey.Serialize()) - //log.Debugf("client nonce:\n%v", nodeInfo) - - // init nodes - log.Info("init peers") - _, _, _, err = initNodePeers(nodeID, pubKeyStorePath) - if err != nil { - return - } - - // do client request - if err = clientRequest(clientOperation, flag.Arg(0)); err != nil { - return - } - - return -} - -func clientRequest(reqType string, sql string) (err error) { - log.SetLevel(log.DebugLevel) - leaderNodeID := kms.BP.NodeID - var conn net.Conn - var client *rpc.Client - - if len(reqType) > 0 && strings.Title(reqType[:1]) == "P" { - if conn, err = rpc.DialToNode(leaderNodeID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - reqType = "Ping" - node1 := proto.NewNode() - node1.InitNodeCryptoInfo(100 * time.Millisecond) - - reqA := &proto.PingReq{ - Node: *node1, - } - - respA := new(proto.PingResp) - log.Debugf("req %#v: %#v", reqType, reqA) - err = client.Call("DHT."+reqType, reqA, respA) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, respA) - } else { - for _, bp := range conf.GConf.KnownNodes { - if bp.Role == proto.Leader || bp.Role == proto.Follower { - if conn, err = rpc.DialToNode(bp.ID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - log.WithField("bp", bp.ID).Debug("calling BP") - reqType = "FindNeighbor" - req := &proto.FindNeighborReq{ - ID: proto.NodeID(flag.Arg(0)), - Count: 10, - } - resp := new(proto.FindNeighborResp) - log.Debugf("req %#v: %#v", reqType, req) - err = client.Call("DHT."+reqType, req, resp) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, resp) - } - } - } - - return -} diff --git a/cmd/cqld/cqld_test.go b/cmd/cqld/cqld_test.go index 7d3c267b6..0e193eee9 100644 --- a/cmd/cqld/cqld_test.go +++ b/cmd/cqld/cqld_test.go @@ -27,6 +27,7 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" @@ -67,11 +68,19 @@ func TestCQLD(t *testing.T) { // Wait for block producing time.Sleep(15 * time.Second) - // Kill one BP + // Kill one BP follower err = nodeCmds[2].Cmd.Process.Signal(syscall.SIGTERM) So(err, ShouldBeNil) time.Sleep(15 * time.Second) + // set current bp to leader bp + for _, n := range conf.GConf.KnownNodes { + if n.Role == proto.Leader { + rpc.SetCurrentBP(n.ID) + break + } + } + // The other peers should be waiting var ( req = &types.FetchLastIrreversibleBlockReq{} diff --git a/cmd/cqld/kayak.go b/cmd/cqld/kayak.go index 2d64c9e04..ee36f886a 100644 --- a/cmd/cqld/kayak.go +++ b/cmd/cqld/kayak.go @@ -38,7 +38,18 @@ func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) return } -// Call handles kayak call. -func (s *KayakService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { +// Apply handles kayak apply call. +func (s *KayakService) Apply(req *kt.ApplyRequest, _ *interface{}) (err error) { return s.rt.FollowerApply(req.Log) } + +// Fetch handles kayak log fetch call. +func (s *KayakService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + var l *kt.Log + if l, err = s.rt.Fetch(req.GetContext(), req.Index); err != nil { + return + } + + resp.Log = l + return +} diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 5164735d7..ca4d92bd1 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -27,6 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -50,14 +51,16 @@ var ( // profile cpuProfile string memProfile string + metricWeb string // other noLogo bool showVersion bool configFile string - clientMode bool - clientOperation string + wsapiAddr string + + logLevel string ) const name = `cqld` @@ -66,34 +69,36 @@ const desc = `CovenantSQL is a Distributed Database running on BlockChain` func init() { flag.BoolVar(&noLogo, "nologo", false, "Do not print logo") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") + flag.StringVar(&metricWeb, "metric-web", "", "Address and port to get internal metrics") - flag.BoolVar(&clientMode, "client", false, "run as client") - flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") + flag.StringVar(&wsapiAddr, "wsapi", "", "Address of the websocket JSON-RPC API, run as API Node") + flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.Usage = func() { - fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) - fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) + _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) + _, _ = fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) flag.PrintDefaults() } } func initLogs() { log.Infof("%#v starting, version %#v, commit %#v, branch %#v", name, version, commit, branch) - log.Infof("%#v, target architecture is %#v, operating system target is %#v", runtime.Version(), runtime.GOARCH, runtime.GOOS) + log.Infof("%#v, target architecture is %#v, operating system target is %#v", + runtime.Version(), runtime.GOARCH, runtime.GOOS) log.Infof("role: %#v", conf.RoleTag) } func main() { + flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) - flag.Parse() if showVersion { fmt.Printf("%v %v %v %v %v\n", @@ -101,6 +106,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) @@ -113,7 +120,7 @@ func main() { kms.InitBP() log.Debugf("config:\n%#v", conf.GConf) - // BP DO NOT Generate new key pair + // BP Never Generate new key pair conf.GConf.GenerateKeyPair = false // init log @@ -123,18 +130,16 @@ func main() { fmt.Print(logo) } - // init profile, if cpuProfile, memProfile length is 0, nothing will be done - utils.StartProfile(cpuProfile, memProfile) - defer utils.StopProfile() - - if clientMode { - if err := runClient(conf.GConf.ThisNodeID); err != nil { - log.WithError(err).Fatal("run client failed") - } else { - log.Info("run client success") + if len(metricWeb) > 0 { + err = metric.InitMetricWeb(metricWeb) + if err != nil { + log.Errorf("start metric web server on %s failed: %v", metricWeb, err) + os.Exit(-1) } - return } + // init profile, if cpuProfile, memProfile length is 0, nothing will be done + _ = utils.StartProfile(cpuProfile, memProfile) + defer utils.StopProfile() if err := runNode(conf.GConf.ThisNodeID, conf.GConf.ListenAddr); err != nil { log.WithError(err).Fatal("run kayak failed") diff --git a/conf/config.go b/conf/config.go index e2949a1c8..5acef45a3 100644 --- a/conf/config.go +++ b/conf/config.go @@ -54,16 +54,8 @@ type BaseAccountInfo struct { type BPGenesisInfo struct { // Version defines the block version Version int32 `yaml:"Version"` - // Producer defines the block producer - Producer hash.Hash `yaml:"Producer"` - // MerkleRoot defines the transaction merkle tree's root - MerkleRoot hash.Hash `yaml:"MerkleRoot"` - // ParentHash defines the parent block's hash - ParentHash hash.Hash `yaml:"ParentHash"` // Timestamp defines the initial time of chain Timestamp time.Time `yaml:"Timestamp"` - // BlockHash defines the block hash of genesis block - BlockHash hash.Hash `yaml:"BlockHash"` // BaseAccounts defines the base accounts for testnet BaseAccounts []BaseAccountInfo `yaml:"BaseAccounts"` } @@ -97,9 +89,10 @@ type MinerDatabaseFixture struct { // MinerInfo for miner config. type MinerInfo struct { // node basic config. - RootDir string `yaml:"RootDir"` - MaxReqTimeGap time.Duration `yaml:"MaxReqTimeGap,omitempty"` - ProvideServiceInterval time.Duration `yaml:"ProvideServiceInterval,omitempty"` + RootDir string `yaml:"RootDir"` + MaxReqTimeGap time.Duration `yaml:"MaxReqTimeGap,omitempty"` + ProvideServiceInterval time.Duration `yaml:"ProvideServiceInterval,omitempty"` + TargetUsers []proto.AccountAddress `yaml:"TargetUsers,omitempty"` // when test mode, fixture database config is used. IsTestMode bool `yaml:"IsTestMode,omitempty"` diff --git a/conf/config_test.go b/conf/config_test.go index 1cdc590dd..43bfc74e5 100644 --- a/conf/config_test.go +++ b/conf/config_test.go @@ -62,12 +62,8 @@ func TestConf(t *testing.T) { }, ChainFileName: "", BPGenesis: BPGenesisInfo{ - Version: 1, - Producer: h, - MerkleRoot: h, - ParentHash: h, - Timestamp: time.Now().UTC(), - BlockHash: h, + Version: 1, + Timestamp: time.Now().UTC(), }, } Convey("LoadConfig", t, func() { diff --git a/conf/limits.go b/conf/limits.go new file mode 100644 index 000000000..36359a024 --- /dev/null +++ b/conf/limits.go @@ -0,0 +1,29 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package conf + +const ( + // MaxTxBroadcastTTL defines the TTL limit of a AddTx request broadcasting within the + // block producers. + MaxTxBroadcastTTL = 1 + // MaxPendingTxsPerAccount defines the limit of pending transactions of one account. + MaxPendingTxsPerAccount = 1000 + // MaxTransactionsPerBlock defines the limit of transactions per block. + MaxTransactionsPerBlock = 10000 + // MaxRPCPoolPhysicalConnection defines max underlying physical connection for one node pair. + MaxRPCPoolPhysicalConnection = 2 +) diff --git a/conf/parameters.go b/conf/parameters.go index 8fbc04951..4fa2296ef 100644 --- a/conf/parameters.go +++ b/conf/parameters.go @@ -16,17 +16,12 @@ package conf -import "time" +// This parameters should be kept consistent in all BPs. +const ( + DefaultConfirmThreshold = float64(2) / 3.0 +) +// This parameters will not cause inconsistency within certain range. const ( - // BPPeriod is the block producer block produce period. - BPPeriod = 3 * time.Second - // BPTick is the block produce block fetch tick. - BPTick = 1 * time.Second - // SQLChainPeriod is the sqlchain block produce period. - SQLChainPeriod = 3 * time.Second - // SQLChainTick is the sqlchain block fetch tick. - SQLChainTick = 1 * time.Second - // SQLChainTTL is the sqlchain unack query billing ttl. - SQLChainTTL = 10 + BPStartupRequiredReachableCount = 2 // NOTE: this includes myself ) diff --git a/conf/testnet/config.yaml b/conf/testnet/config.yaml index 2b4df6adf..6716e0b7c 100644 --- a/conf/testnet/config.yaml +++ b/conf/testnet/config.yaml @@ -26,6 +26,30 @@ BlockProducer: b: 0 c: 0 d: 6148914694092305796 + ChainFileName: chain.db + BPGenesisInfo: + Version: 1 + Producer: "0000000000000000000000000000000000000000000000000000000000000001" + MerkleRoot: "0000000000000000000000000000000000000000000000000000000000000001" + ParentHash: "0000000000000000000000000000000000000000000000000000000000000001" + Timestamp: 2019-01-02T13:33:00Z + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 58aceaf4b730b54bf00c0fb3f7b14886de470767f313c2d108968cd8bf0794b7 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 Nonce: @@ -90,3 +114,12 @@ KnownNodes: Addr: "127.0.0.1:4661" PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 Role: Client +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 10s +BPTick: 3s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/crypto/asymmetric/signature.go b/crypto/asymmetric/signature.go index ee3a02b9a..3aaf08045 100644 --- a/crypto/asymmetric/signature.go +++ b/crypto/asymmetric/signature.go @@ -21,16 +21,19 @@ import ( "errors" "math/big" - "github.com/CovenantSQL/CovenantSQL/crypto/secp256k1" - "github.com/CovenantSQL/CovenantSQL/utils" hsp "github.com/CovenantSQL/HashStablePack/marshalhash" ec "github.com/btcsuite/btcd/btcec" + lru "github.com/hashicorp/golang-lru" + + "github.com/CovenantSQL/CovenantSQL/crypto/secp256k1" + "github.com/CovenantSQL/CovenantSQL/utils" ) var ( // BypassSignature is the flag indicate if bypassing signature sign & verify BypassSignature = false bypassS *Signature + verifyCache *lru.Cache ) // For test Signature.Sign mock @@ -38,6 +41,7 @@ func init() { priv, _ := ec.NewPrivateKey(ec.S256()) ss, _ := (*ec.PrivateKey)(priv).Sign(([]byte)("00000000000000000000000000000000")) bypassS = (*Signature)(ss) + verifyCache, _ = lru.New(256) } // Signature is a type representing an ecdsa signature. @@ -85,6 +89,7 @@ func (private *PrivateKey) Sign(hash []byte) (*Signature, error) { S: new(big.Int).SetBytes(sb[32:64]), } //s, e := (*ec.PrivateKey)(private).Sign(hash) + return (*Signature)(s), e } @@ -98,12 +103,22 @@ func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { return false } - signature := make([]byte, 64) + cacheKey := make([]byte, 64+len(hash)+ec.PubKeyBytesLenUncompressed) + signature := cacheKey[:64] copy(signature, utils.PaddedBigBytes(s.R, 32)) copy(signature[32:], utils.PaddedBigBytes(s.S, 32)) + copy(cacheKey[64:64+len(hash)], hash) signeeBytes := (*ec.PublicKey)(signee).SerializeUncompressed() - ret := secp256k1.VerifySignature(signeeBytes, hash, signature) - return ret + copy(cacheKey[64+len(hash):], signeeBytes) + + if _, ok := verifyCache.Get(string(cacheKey)); ok { + return true + } + valid := secp256k1.VerifySignature(signeeBytes, hash, signature) + if valid { + verifyCache.Add(string(cacheKey), nil) + } + return valid //return ecdsa.Verify(signee.toECDSA(), hash, s.R, s.S) } diff --git a/crypto/etls/conn.go b/crypto/etls/conn.go index eca5c6ebf..f73a3fba8 100644 --- a/crypto/etls/conn.go +++ b/crypto/etls/conn.go @@ -76,9 +76,6 @@ func (c *CryptoConn) Read(b []byte) (n int, err error) { if err = c.initDecrypt(iv); err != nil { return } - if len(c.iv) == 0 { - c.iv = iv - } c.decrypt(header, header) if header[0] != ETLSMagicBytes[0] || header[1] != ETLSMagicBytes[1] { err = errors.New("bad stream ETLS header") diff --git a/crypto/etls/conn_test.go b/crypto/etls/conn_test.go index ffa9b1010..1c62bcff8 100644 --- a/crypto/etls/conn_test.go +++ b/crypto/etls/conn_test.go @@ -23,9 +23,11 @@ import ( "sync" "testing" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" ) const service = "127.0.0.1:28000" @@ -178,6 +180,16 @@ func clientComplex(pass string, args *QueryComplex) (ret *ResultComplex, err err func handleClient(conn net.Conn) { defer conn.Close() + var err error + + if c, ok := conn.(*CryptoConn); ok { + conn, err = simpleCipherHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } + } + rpc.ServeConn(conn) log.Debugln("server: conn: closed") } @@ -272,8 +284,16 @@ func TestCryptoConn_RW(t *testing.T) { go func() { rBuf := make([]byte, len(msg)) conn, err := l.Accept() - cc, _ := conn.(*CryptoConn) - n, err := cc.Read(rBuf) + + if c, ok := conn.(*CryptoConn); ok { + conn, err = l.CHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } + } + + n, err := conn.Read(rBuf) c.So(n, ShouldEqual, len(msg)) c.So(string(rBuf), ShouldResemble, msg) c.So(err, ShouldBeNil) diff --git a/crypto/etls/encrypt.go b/crypto/etls/encrypt.go index c50fd9249..34e0eaf39 100644 --- a/crypto/etls/encrypt.go +++ b/crypto/etls/encrypt.go @@ -86,7 +86,6 @@ type Cipher struct { decStream cipher.Stream key []byte info *cipherInfo - iv []byte } // NewCipher creates a cipher that can be used in Dial(), Listen() etc. @@ -109,14 +108,9 @@ func NewCipher(rawKey []byte) (c *Cipher) { // initEncrypt Initializes the block cipher with CFB mode, returns IV. func (c *Cipher) initEncrypt() (iv []byte, err error) { - if c.iv == nil { - iv = make([]byte, c.info.ivLen) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, err - } - c.iv = iv - } else { - iv = c.iv + iv = make([]byte, c.info.ivLen) + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err } c.encStream, err = c.info.newEncStream(c.key, iv) return diff --git a/crypto/etls/listener.go b/crypto/etls/listener.go index b0bc8d5bd..cb350a254 100644 --- a/crypto/etls/listener.go +++ b/crypto/etls/listener.go @@ -45,7 +45,9 @@ func (l *CryptoListener) Accept() (net.Conn, error) { return nil, err } - return l.CHandler(c) + return &CryptoConn{ + Conn: c, + }, nil } // Close closes the listener. diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 2719a707c..10a2a41a5 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -17,7 +17,6 @@ package kms import ( - "errors" "os" "path/filepath" "runtime" @@ -32,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" bolt "github.com/coreos/bbolt" + "github.com/pkg/errors" ) // PublicKeyStore holds db and bucket name @@ -145,7 +145,7 @@ func InitPublicKeyStore(dbPath string, initNodes []proto.Node) (err error) { for _, n := range initNodes { err = setNode(&n) if err != nil { - log.WithError(err).Error("set init nodes failed") + err = errors.Wrap(err, "set init nodes failed") return } } @@ -186,7 +186,7 @@ func GetNodeInfo(id proto.NodeID) (nodeInfo *proto.Node, err error) { return err // return from View func }) if err != nil { - log.WithError(err).Error("get node info failed") + err = errors.Wrap(err, "get node info failed") } return } @@ -210,7 +210,7 @@ func GetAllNodeID() (nodeIDs []proto.NodeID, err error) { return err // return from View func }) if err != nil { - log.WithError(err).Error("get all node id failed") + err = errors.Wrap(err, "get all node id failed") } return @@ -260,7 +260,7 @@ func setNode(nodeInfo *proto.Node) (err error) { nodeBuf, err := utils.EncodeMsgPack(nodeInfo) if err != nil { - log.WithError(err).Error("marshal node info failed") + err = errors.Wrap(err, "marshal node info failed") return } log.Debugf("set node: %#v", nodeInfo) @@ -273,7 +273,7 @@ func setNode(nodeInfo *proto.Node) (err error) { return bucket.Put([]byte(nodeInfo.ID), nodeBuf.Bytes()) }) if err != nil { - log.WithError(err).Error("get node info failed") + err = errors.Wrap(err, "get node info failed") } return @@ -295,7 +295,7 @@ func DelNode(id proto.NodeID) (err error) { return bucket.Delete([]byte(id)) }) if err != nil { - log.WithError(err).Error("del node failed") + err = errors.Wrap(err, "del node failed") } return } @@ -309,7 +309,7 @@ func removeBucket() (err error) { return tx.DeleteBucket([]byte(kmsBucketName)) }) if err != nil { - log.WithError(err).Error("remove bucket failed") + err = errors.Wrap(err, "remove bucket failed") return } // ks.bucket == nil means bucket not exist @@ -332,7 +332,7 @@ func ResetBucket() error { }) pks.bucket = bucketName if err != nil { - log.WithError(err).Error("reset bucket failed") + err = errors.Wrap(err, "reset bucket failed") } return err diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index 707f839db..911ba6a77 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -26,6 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" yaml "gopkg.in/yaml.v2" ) @@ -71,7 +72,7 @@ func TestDB(t *testing.T) { pubk, err = GetPublicKey(proto.NodeID("99999999")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) err = SetNode(nil) So(err, ShouldEqual, ErrNilNode) @@ -116,31 +117,31 @@ func TestDB(t *testing.T) { pubk, err = GetPublicKey(proto.NodeID("2222")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) err = removeBucket() So(err, ShouldBeNil) pubk, err = GetPublicKey(proto.NodeID("not exist")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = setNode(node1) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = DelNode(proto.NodeID("2222")) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = ResetBucket() So(err, ShouldBeNil) pubk, err = GetPublicKey(proto.NodeID("2222")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) diff --git a/crypto/verifier/common.go b/crypto/verifier/common.go index 7885eb67a..dc99a8fbb 100644 --- a/crypto/verifier/common.go +++ b/crypto/verifier/common.go @@ -33,7 +33,11 @@ type MarshalHasher interface { // MarshalHasher, can be signed by a private key and verified later. type HashSignVerifier interface { Hash() hash.Hash + SetHash(MarshalHasher) error + SignHash(*ca.PrivateKey) error Sign(MarshalHasher, *ca.PrivateKey) error + VerifyHash(MarshalHasher) error + VerifySignature() error Verify(MarshalHasher) error } @@ -49,23 +53,37 @@ func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { return i.DataHash } -// Sign implements HashSignVerifier.Sign. -func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { +// SetHash implements HashSignVerifier.SetHash. +func (i *DefaultHashSignVerifierImpl) SetHash(mh MarshalHasher) (err error) { var enc []byte if enc, err = mh.MarshalHash(); err != nil { return } - var h = hash.THashH(enc) - if i.Signature, err = signer.Sign(h[:]); err != nil { + i.DataHash = hash.THashH(enc) + return +} + +// SignHash implements HashSignVerifier.SignHash. +func (i *DefaultHashSignVerifierImpl) SignHash(signer *ca.PrivateKey) (err error) { + if i.Signature, err = signer.Sign(i.DataHash[:]); err != nil { return } - i.DataHash = h i.Signee = signer.PubKey() return } -// Verify implements HashSignVerifier.Verify. -func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { +// Sign implements HashSignVerifier.Sign. +func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { + // Set hash + if err = i.SetHash(mh); err != nil { + return + } + err = i.SignHash(signer) + return +} + +// VerifyHash implements HashSignVerifier.VerifyHash. +func (i *DefaultHashSignVerifierImpl) VerifyHash(mh MarshalHasher) (err error) { var enc []byte if enc, err = mh.MarshalHash(); err != nil { return @@ -75,9 +93,23 @@ func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { err = errors.WithStack(ErrHashValueNotMatch) return } - if i.Signature == nil || i.Signee == nil || !i.Signature.Verify(h[:], i.Signee) { + return +} + +// VerifySignature implements HashSignVerifier.VerifySignature. +func (i *DefaultHashSignVerifierImpl) VerifySignature() (err error) { + if !i.Signature.Verify(i.DataHash[:], i.Signee) { err = errors.WithStack(ErrSignatureNotMatch) return } return } + +// Verify implements HashSignVerifier.Verify. +func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { + if err = i.VerifyHash(mh); err != nil { + return + } + err = i.VerifySignature() + return +} diff --git a/crypto/verifier/common_gen.go b/crypto/verifier/common_gen.go index 943ac0eda..eaa23367a 100644 --- a/crypto/verifier/common_gen.go +++ b/crypto/verifier/common_gen.go @@ -11,49 +11,46 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if z.Signee == nil { + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if z.Signature == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { + if oTemp, err := z.Signature.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if z.Signature == nil { + if z.Signee == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { + if oTemp, err := z.Signee.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.DataHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { + s = 1 + 9 + z.DataHash.Msgsize() + 10 + if z.Signature == nil { s += hsp.NilSize } else { - s += z.Signee.Msgsize() + s += z.Signature.Msgsize() } - s += 10 - if z.Signature == nil { + s += 7 + if z.Signee == nil { s += hsp.NilSize } else { - s += z.Signature.Msgsize() + s += z.Signee.Msgsize() } - s += 9 + z.DataHash.Msgsize() return } diff --git a/docker-compose.yml b/docker-compose.yml index 592c67902..7117c291c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "3" +version: '3' services: covenantsql_bp_0: @@ -6,121 +6,133 @@ services: container_name: covenantsql_bp_0 restart: always ports: - - "11099:4661" + - '11099:4661' + - '12099:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_0/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_0/:/app/node_0/ networks: default: ipv4_address: 172.254.1.2 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_bp_1: image: covenantsql/covenantsql:latest container_name: covenantsql_bp_1 restart: always ports: - - "11100:4661" + - '11100:4661' + - '12100:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_1/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_1/:/app/node_1/ networks: default: ipv4_address: 172.254.1.3 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_bp_2: image: covenantsql/covenantsql:latest container_name: covenantsql_bp_2 restart: always ports: - - "11101:4661" + - '11101:4661' + - '12101:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_2/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_2/:/app/node_2/ networks: default: ipv4_address: 172.254.1.4 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_0: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_0 restart: always ports: - - "11102:4661" + - '11102:4661' + - '12102:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_0/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_0/:/app/node_miner_0/ networks: default: ipv4_address: 172.254.1.5 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_1: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_1 restart: always ports: - - "11103:4661" + - '11103:4661' + - '12103:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_1/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_1/:/app/node_miner_1/ networks: default: ipv4_address: 172.254.1.6 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_2: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_2 restart: always ports: - - "11104:4661" + - '11104:4661' + - '12104:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_2/config.yaml + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_2/:/app/node_miner_2/ networks: default: ipv4_address: 172.254.1.7 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_adapter: image: covenantsql/covenantsql:latest container_name: covenantsql_adapter restart: always ports: - - "11105:4661" + - '11105:4661' environment: COVENANT_ROLE: adapter COVENANT_CONF: ./node_adapter/config.yaml @@ -136,8 +148,8 @@ services: container_name: covenantsql_mysql_adapter restart: always ports: - - "11107:4664" - command: ["-listen", "0.0.0.0:4664"] + - '11107:4664' + command: ['-listen', '0.0.0.0:4664'] environment: COVENANT_ROLE: mysql-adapter COVENANT_CONF: ./node_mysql_adapter/config.yaml @@ -147,16 +159,16 @@ services: default: ipv4_address: 172.254.1.10 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_observer: image: covenantsql/covenantsql-observer:latest container_name: covenantsql_observer restart: always ports: - - "11108:80" + - '11108:80' environment: COVENANT_ROLE: observer COVENANT_CONF: ./node_observer/config.yaml @@ -168,9 +180,29 @@ services: default: ipv4_address: 172.254.1.9 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "10m" + max-size: '10m' + covenantsql_fn_0: + image: covenantsql/covenantsql:latest + container_name: covenantsql_fn_0 + restart: always + ports: + - '11110:8546' + command: ['-wsapi', ':8546'] + # entrypoint: ["sh"] + environment: + COVENANT_ROLE: blockproducer + COVENANT_CONF: ./fullnode_0/config.yaml + volumes: + - ./test/service/fullnode_0/:/app/fullnode_0/ + networks: + default: + ipv4_address: 172.254.1.11 + logging: + driver: 'json-file' + options: + max-size: '10m' networks: default: diff --git a/docker/observer.Dockerfile b/docker/observer.Dockerfile index 6e37949b2..d0c485796 100644 --- a/docker/observer.Dockerfile +++ b/docker/observer.Dockerfile @@ -20,6 +20,8 @@ RUN apk --no-cache add ca-certificates WORKDIR /app COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql-observer /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql-utils /app/ COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/docker-entry.sh /app/ ENTRYPOINT [ "./docker-entry.sh" ] EXPOSE 4661 diff --git a/genMarshalHash.sh b/genMarshalHash.sh index f336601d3..bfd98fcac 100755 --- a/genMarshalHash.sh +++ b/genMarshalHash.sh @@ -2,10 +2,8 @@ PROJECT_DIR=$(cd $(dirname $0)/; pwd) -if [[ -x hsp ]]; then - echo "install HashStablePack cmd: hsp" - go get -u github.com/CovenantSQL/HashStablePack/hsp -fi +echo "install HashStablePack cmd: hsp" +go get -v -u github.com/CovenantSQL/HashStablePack/hsp echo ${PROJECT_DIR} diff --git a/kayak/callbacks.go b/kayak/callbacks.go new file mode 100644 index 000000000..0bc1cc20f --- /dev/null +++ b/kayak/callbacks.go @@ -0,0 +1,54 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) doCheck(ctx context.Context, req interface{}) (err error) { + defer trace.StartRegion(ctx, "checkCallback").End() + if err = r.sh.Check(req); err != nil { + err = errors.Wrap(err, "verify log") + } + + return +} + +func (r *Runtime) doEncodePayload(ctx context.Context, req interface{}) (enc []byte, err error) { + defer trace.StartRegion(ctx, "encodePayloadCallback").End() + if enc, err = r.sh.EncodePayload(req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + } + return +} + +func (r *Runtime) doDecodePayload(ctx context.Context, data []byte) (req interface{}, err error) { + defer trace.StartRegion(ctx, "decodePayloadCallback").End() + if req, err = r.sh.DecodePayload(data); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") + } + return +} + +func (r *Runtime) doCommit(ctx context.Context, req interface{}, isLeader bool) (result interface{}, err error) { + defer trace.StartRegion(ctx, "commitCallback").End() + return r.sh.Commit(req, isLeader) +} diff --git a/kayak/commit.go b/kayak/commit.go new file mode 100644 index 000000000..2fd34d33d --- /dev/null +++ b/kayak/commit.go @@ -0,0 +1,267 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "sync/atomic" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) leaderCommitResult(ctx context.Context, tm *timer.Timer, reqPayload interface{}, prepareLog *kt.Log) (res *commitFuture) { + defer trace.StartRegion(ctx, "leaderCommitResult").End() + + // decode log and send to commit channel to process + res = newCommitFuture() + + if prepareLog == nil { + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit")}) + return + } + + // decode prepare log + req := &commitReq{ + ctx: ctx, + data: reqPayload, + index: prepareLog.Index, + result: res, + tm: tm, + } + + select { + case <-ctx.Done(): + res = nil + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) followerCommitResult(ctx context.Context, tm *timer.Timer, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res *commitFuture) { + defer trace.StartRegion(ctx, "followerCommitResult").End() + + // decode log and send to commit channel to process + res = newCommitFuture() + + if prepareLog == nil { + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit")}) + return + } + + myLastCommit := atomic.LoadUint64(&r.lastCommit) + + // check committed index + if lastCommit < myLastCommit { + // leader pushed a early index before commit + log.WithFields(log.Fields{ + "head": myLastCommit, + "supplied": lastCommit, + }).Warning("invalid last commit log") + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index")}) + return + } + + // decode prepare log + var logReq interface{} + var err error + if logReq, err = r.doDecodePayload(ctx, prepareLog.Data); err != nil { + res.Set(&commitResult{err: errors.Wrap(err, "decode log payload failed")}) + return + } + + tm.Add("decode_payload") + + req := &commitReq{ + ctx: ctx, + data: logReq, + index: prepareLog.Index, + lastCommit: lastCommit, + result: res, + log: commitLog, + tm: tm, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) commitCycle() { + for { + var cReq *commitReq + + select { + case <-r.stopCh: + return + case cReq = <-r.commitCh: + } + + if cReq != nil { + r.doCommitCycle(cReq) + } + } +} + +func (r *Runtime) leaderDoCommit(req *commitReq) { + if req.log != nil { + // mis-use follower commit for leader + log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") + return + } + + req.tm.Add("queue") + + // create leader log + var ( + l *kt.Log + logData []byte + cr = &commitResult{} + err error + ) + + logData = append(logData, r.uint64ToBytes(req.index)...) + logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) + + if l, err = r.newLog(req.ctx, kt.LogCommit, logData); err != nil { + // serve error, leader could not write log + return + } + + req.tm.Add("write_wal") + + // not wrapping underlying handler commit error + cr.result, err = r.doCommit(req.ctx, req.data, true) + + req.tm.Add("db_write") + + // mark last commit + atomic.StoreUint64(&r.lastCommit, l.Index) + + // send commit + cr.rpc = r.applyRPC(l, r.minCommitFollowers) + cr.index = l.Index + cr.err = err + + // TODO(): text log for rpc errors + + // TODO(): mark uncommitted nodes and remove from peers + + req.result.Set(cr) + + req.tm.Add("send_follower_commit") + + return +} + +func (r *Runtime) followerDoCommit(req *commitReq) { + if req.log == nil { + log.Fatal("NO LOG FOR FOLLOWER COMMIT") + return + } + + waitCommitTask := trace.StartRegion(req.ctx, "waitForLastCommit") + + // check for last commit availability + myLastCommit := atomic.LoadUint64(&r.lastCommit) + if req.lastCommit != myLastCommit { + // TODO(): need counter for retries, infinite commit re-order would cause troubles + go func(req *commitReq) { + _, _ = r.waitForLog(req.ctx, req.lastCommit) + r.commitCh <- req + }(req) + waitCommitTask.End() + return + } + + waitCommitTask.End() + req.tm.Add("queue") + + defer trace.StartRegion(req.ctx, "commitCycle").End() + + var err error + + // write log first + if err = r.writeWAL(req.ctx, req.log); err != nil { + return + } + + req.tm.Add("write_wal") + + // do commit, not wrapping underlying handler commit error + _, err = r.doCommit(req.ctx, req.data, false) + + req.tm.Add("db_write") + + // mark last commit + atomic.StoreUint64(&r.lastCommit, req.log.Index) + + req.result.Set(&commitResult{ + err: err, + }) + + return +} + +func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { + defer trace.StartRegion(ctx, "getPrepareLog").End() + + var prepareIndex uint64 + + // decode prepare index + if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { + err = errors.Wrap(err, "log does not contain valid prepare index") + return + } + + if pl, err = r.waitForLog(ctx, prepareIndex); err != nil { + err = errors.Wrap(err, "wait for prepare log failed") + return + } + + // decode commit index + if len(l.Data) >= 16 { + lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) + + if _, err = r.waitForLog(ctx, lastCommitIndex); err != nil { + err = errors.Wrap(err, "wait for last commit log failed") + return + } + } + + return +} + +func (r *Runtime) doCommitCycle(req *commitReq) { + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if r.role == proto.Leader { + defer trace.StartRegion(req.ctx, "commitCycle").End() + r.leaderDoCommit(req) + } else { + r.followerDoCommit(req) + } +} diff --git a/kayak/fetch.go b/kayak/fetch.go new file mode 100644 index 000000000..9f6069918 --- /dev/null +++ b/kayak/fetch.go @@ -0,0 +1,99 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func (r *Runtime) markMissingLog(index uint64) { + log.WithFields(log.Fields{ + "index": index, + "instance": r.instanceID, + }).Debug("mark log missing, start fetch") + rawItem, _ := r.waitLogMap.LoadOrStore(index, newWaitItem(index)) + item := rawItem.(*waitItem) + + select { + case <-r.stopCh: + case r.missingLogCh <- item: + } +} + +func (r *Runtime) missingLogCycle() { + for { + var waitItem *waitItem + + select { + case <-r.stopCh: + return + case waitItem = <-r.missingLogCh: + } + + // execute + func() { + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if waitItem == nil { + return + } + + waitItem.waitLock.Lock() + defer waitItem.waitLock.Unlock() + + var ( + req = &kt.FetchRequest{ + Instance: r.instanceID, + Index: waitItem.index, + } + resp = &kt.FetchResponse{} + err error + ) + + // check existence + if _, err = r.wal.Get(waitItem.index); err == nil { + // already exists + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).Debug("log already exists") + r.triggerLogAwaits(waitItem.index) + return + } + + if err = r.getCaller(r.peers.Leader).Call(r.fetchRPCMethod, req, resp); err != nil { + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).WithError(err).Debug("fetch log failed") + return + } + + // call follower apply + if resp.Log != nil { + if err = r.FollowerApply(resp.Log); err != nil { + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).WithError(err).Debug("apply log failed") + } + } + }() + } +} diff --git a/kayak/log.go b/kayak/log.go new file mode 100644 index 000000000..72c68f865 --- /dev/null +++ b/kayak/log.go @@ -0,0 +1,123 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "io" + "log" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) newLog(ctx context.Context, logType kt.LogType, data []byte) (l *kt.Log, err error) { + defer trace.StartRegion(ctx, "newWAL").End() + + // allocate index + r.nextIndexLock.Lock() + i := r.nextIndex + r.nextIndex++ + r.nextIndexLock.Unlock() + l = &kt.Log{ + LogHeader: kt.LogHeader{ + Index: i, + Type: logType, + Producer: r.nodeID, + }, + Data: data, + } + + // error write will be a fatal error, cause to node to fail fast + if err = r.wal.Write(l); err != nil { + log.Fatalf("WRITE LOG FAILED: %v", err) + } + + return +} + +func (r *Runtime) writeWAL(ctx context.Context, l *kt.Log) (err error) { + defer trace.StartRegion(ctx, "writeWal").End() + + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower log failed") + } + + return +} + +func (r *Runtime) readLogs() (err error) { + // load logs, only called during init + var l *kt.Log + + for { + if l, err = r.wal.Read(); err != nil && err != io.EOF { + err = errors.Wrap(err, "load previous logs in wal failed") + return + } else if err == io.EOF { + err = nil + break + } + + switch l.Type { + case kt.LogPrepare: + // record in pending prepares + r.pendingPrepares[l.Index] = true + case kt.LogCommit: + // record last commit + var lastCommit uint64 + var prepareLog *kt.Log + if lastCommit, prepareLog, err = r.getPrepareLog(context.Background(), l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if lastCommit != r.lastCommit { + err = errors.Wrapf(err, + "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + r.lastCommit = l.Index + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + case kt.LogRollback: + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(context.Background(), l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + default: + err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) + return + } + + // record nextIndex + r.updateNextIndex(context.Background(), l) + } + + return +} diff --git a/kayak/processes.go b/kayak/processes.go new file mode 100644 index 000000000..85796224a --- /dev/null +++ b/kayak/processes.go @@ -0,0 +1,224 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req interface{}) (prepareLog *kt.Log, err error) { + defer trace.StartRegion(ctx, "doLeaderPrepare").End() + + // check prepare in leader + if err = r.doCheck(ctx, req); err != nil { + err = errors.Wrap(err, "leader verify log") + return + } + + tm.Add("leader_check") + + // encode request + var encBuf []byte + if encBuf, err = r.doEncodePayload(ctx, req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + return + } + + tm.Add("leader_encode_payload") + + // create prepare request + if prepareLog, err = r.leaderLogPrepare(ctx, tm, encBuf); err != nil { + // serve error, leader could not write logs, change leader in block producer + // TODO(): CHANGE LEADER + return + } + + // Leader pending map handling. + r.markPendingPrepare(ctx, prepareLog.Index) + + tm.Add("leader_prepare") + + // send prepare to all nodes + prepareTracker := r.applyRPC(prepareLog, r.minPreparedFollowers) + prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) + defer prepareCtxCancelFunc() + prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) + if !prepareDone { + // timeout, rollback + err = kt.ErrPrepareTimeout + return + } + + tm.Add("follower_prepare") + + // collect errors + err = r.errorSummary(prepareErrors) + + return +} + +func (r *Runtime) doLeaderCommit(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log, req interface{}) ( + result interface{}, logIndex uint64, err error) { + defer trace.StartRegion(ctx, "doLeaderCommit").End() + var commitResult *commitResult + if commitResult, err = r.leaderCommitResult(ctx, tm, req, prepareLog).Get(ctx); err != nil { + return + } + + result = commitResult.result + logIndex = commitResult.index + err = commitResult.err + + if commitResult.rpc != nil { + commitResult.rpc.get(ctx) + } + + tm.Add("wait_follower_commit") + + return +} + +func (r *Runtime) doLeaderRollback(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log) { + defer trace.StartRegion(ctx, "doLeaderRollback").End() + // rollback local + var rollbackLog *kt.Log + var logErr error + if rollbackLog, logErr = r.leaderLogRollback(ctx, tm, prepareLog.Index); logErr != nil { + // serve error, construct rollback log failed, internal error + // TODO(): CHANGE LEADER + return + } + + defer trace.StartRegion(ctx, "followerRollback").End() + + // async send rollback to all nodes + r.applyRPC(rollbackLog, 0) + + tm.Add("follower_rollback") +} + +func (r *Runtime) leaderLogPrepare(ctx context.Context, tm *timer.Timer, data []byte) (*kt.Log, error) { + defer trace.StartRegion(ctx, "leaderLogPrepare").End() + defer tm.Add("leader_log_prepare") + // just write new log + return r.newLog(ctx, kt.LogPrepare, data) +} + +func (r *Runtime) leaderLogRollback(ctx context.Context, tm *timer.Timer, i uint64) (*kt.Log, error) { + defer trace.StartRegion(ctx, "leaderLogRollback").End() + defer tm.Add("leader_log_rollback") + // just write new log + return r.newLog(ctx, kt.LogRollback, r.uint64ToBytes(i)) +} + +func (r *Runtime) followerPrepare(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + defer func() { + log.WithField("r", l.Index).WithFields(tm.ToLogFields()).Debug("kayak follower prepare stat") + }() + + // decode + var req interface{} + if req, err = r.doDecodePayload(ctx, l.Data); err != nil { + return + } + tm.Add("decode") + + if err = r.doCheck(ctx, req); err != nil { + return + } + tm.Add("check") + + // write log + if err = r.writeWAL(ctx, l); err != nil { + return + + } + tm.Add("write_wal") + + r.markPendingPrepare(ctx, l.Index) + tm.Add("mark") + + return +} + +func (r *Runtime) followerRollback(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(ctx, l); err != nil || prepareLog == nil { + err = errors.Wrap(err, "get original request in rollback failed") + return + } + tm.Add("get_prepare") + + // check if prepare already processed + if r.checkIfPrepareFinished(ctx, prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + tm.Add("check_prepare") + + // write wal + if err = r.writeWAL(ctx, l); err != nil { + return + } + tm.Add("write_wal") + + r.markPrepareFinished(ctx, l.Index) + tm.Add("mark") + + return +} + +func (r *Runtime) followerCommit(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + var ( + prepareLog *kt.Log + lastCommit uint64 + cResult *commitResult + ) + + defer func() { + log.WithField("r", l.Index).WithFields(tm.ToLogFields()).Debug("kayak follower commit stat") + }() + + if lastCommit, prepareLog, err = r.getPrepareLog(ctx, l); err != nil { + err = errors.Wrap(err, "get original request in commit failed") + return + } + tm.Add("get_prepare") + + // check if prepare already processed + if r.checkIfPrepareFinished(ctx, prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + tm.Add("check_prepare") + + cResult, err = r.followerCommitResult(ctx, tm, l, prepareLog, lastCommit).Get(ctx) + if cResult != nil { + err = cResult.err + } + + r.markPrepareFinished(ctx, l.Index) + tm.Add("mark") + + return +} diff --git a/kayak/rpc.go b/kayak/rpc.go new file mode 100644 index 000000000..07a532426 --- /dev/null +++ b/kayak/rpc.go @@ -0,0 +1,56 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" +) + +func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { + failNodes := make(map[proto.NodeID]error) + + for s, err := range errs { + if err != nil { + failNodes[s] = err + } + } + + if len(failNodes) == 0 { + return nil + } + + return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) +} + +/// rpc related +func (r *Runtime) applyRPC(l *kt.Log, minCount int) (tracker *rpcTracker) { + req := &kt.ApplyRequest{ + Instance: r.instanceID, + Log: l, + } + + tracker = newTracker(r, req, minCount) + tracker.send() + + // TODO(): track this rpc + + // TODO(): log remote errors + + return +} diff --git a/kayak/runtime.go b/kayak/runtime.go index ed8476bd4..f6bed6e5c 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -18,9 +18,6 @@ package kayak import ( "context" - "encoding/binary" - "fmt" - "io" "math" "sync" "sync/atomic" @@ -28,16 +25,19 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) const ( // commit channel window size commitWindow = 0 - // prepare window - trackerWindow = 10 + // missing log window + missingLogWindow = 10 + // missing log concurrency + missingLogConcurrency = 10 ) // Runtime defines the main kayak Runtime. @@ -82,10 +82,10 @@ type Runtime struct { callerMap sync.Map // map[proto.NodeID]Caller // service name for mux service. serviceName string - // rpc method for coordination requests. - rpcMethod string - // tracks the outgoing rpc requests. - rpcTrackCh chan *rpcTracker + // rpc method for apply requests. + applyRPCMethod string + // rpc method for fetch requests. + fetchRPCMethod string //// Parameters // prepare threshold defines the minimum node count requirement for prepare operation. @@ -96,8 +96,13 @@ type Runtime struct { prepareTimeout time.Duration // commit timeout defines the max allowed time for commit operation. commitTimeout time.Duration + // log wait timeout to fetch missing logs. + logWaitTimeout time.Duration // channel for awaiting commits. commitCh chan *commitReq + // channel for missing log indexes. + missingLogCh chan *waitItem + waitLogMap sync.Map // map[uint64]*waitItem /// Sub-routines management. started uint32 @@ -112,18 +117,50 @@ type commitReq struct { index uint64 lastCommit uint64 log *kt.Log - result chan *commitResult + result *commitFuture + tm *timer.Timer } -// followerCommitResult defines the commit operation result. +// commitResult defines the commit operation result. type commitResult struct { - start time.Time - dbCost time.Duration + index uint64 result interface{} err error rpc *rpcTracker } +type commitFuture struct { + ch chan *commitResult +} + +func newCommitFuture() *commitFuture { + return &commitFuture{ + ch: make(chan *commitResult, 1), + } +} + +func (f *commitFuture) Get(ctx context.Context) (cr *commitResult, err error) { + if f == nil || f.ch == nil { + err = errors.Wrap(ctx.Err(), "enqueue commit timeout") + return + } + + select { + case <-ctx.Done(): + err = errors.Wrap(ctx.Err(), "get commit result timeout") + return + case cr = <-f.ch: + return + } +} + +func (f *commitFuture) Set(cr *commitResult) { + select { + case f.ch <- cr: + default: + } +} + // NewRuntime creates new kayak Runtime. func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { if cfg == nil { @@ -190,16 +227,18 @@ func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { minCommitFollowers: minCommitFollowers, // rpc related - serviceName: cfg.ServiceName, - rpcMethod: fmt.Sprintf("%v.%v", cfg.ServiceName, cfg.MethodName), - rpcTrackCh: make(chan *rpcTracker, trackerWindow), + serviceName: cfg.ServiceName, + applyRPCMethod: cfg.ServiceName + "." + cfg.ApplyMethodName, + fetchRPCMethod: cfg.ServiceName + "." + cfg.FetchMethodName, // commits related prepareThreshold: cfg.PrepareThreshold, prepareTimeout: cfg.PrepareTimeout, commitThreshold: cfg.CommitThreshold, commitTimeout: cfg.CommitTimeout, + logWaitTimeout: cfg.LogWaitTimeout, commitCh: make(chan *commitReq, commitWindow), + missingLogCh: make(chan *waitItem, missingLogWindow), // stop coordinator stopCh: make(chan struct{}), @@ -221,8 +260,10 @@ func (r *Runtime) Start() (err error) { // start commit cycle r.goFunc(r.commitCycle) - // start rpc tracker collector - // TODO(): + // start missing log worker + for i := 0; i != missingLogConcurrency; i++ { + r.goFunc(r.missingLogCycle) + } return } @@ -245,164 +286,83 @@ func (r *Runtime) Shutdown() (err error) { // Apply defines entry for Leader node. func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { - var commitFuture <-chan *commitResult - var cResult *commitResult + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return + } + + ctx, task := trace.NewTask(ctx, "Kayak.Apply") + defer task.End() - var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, - tmRollback, tmCommitDequeue, tmLeaderCommit, tmCommit time.Time - var dbCost time.Duration + tm := timer.NewTimer() defer func() { - fields := log.Fields{ - "r": logIndex, - } - if !tmLeaderPrepare.Before(tmStart) { - fields["lp"] = tmLeaderPrepare.Sub(tmStart).Nanoseconds() - } - if !tmFollowerPrepare.Before(tmLeaderPrepare) { - fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare).Nanoseconds() - } - if !tmLeaderRollback.Before(tmFollowerPrepare) { - fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare).Nanoseconds() - } - if !tmRollback.Before(tmLeaderRollback) { - fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() - } - if !tmCommitEnqueue.Before(tmFollowerPrepare) { - fields["eq"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() - } - if !tmCommitDequeue.Before(tmCommitEnqueue) { - fields["dq"] = tmCommitDequeue.Sub(tmCommitEnqueue).Nanoseconds() - } - if !tmLeaderCommit.Before(tmCommitDequeue) { - fields["lc"] = tmLeaderCommit.Sub(tmCommitDequeue).Nanoseconds() - } - if !tmCommit.Before(tmLeaderCommit) { - fields["fc"] = tmCommit.Sub(tmLeaderCommit).Nanoseconds() - } - if dbCost > 0 { - fields["dc"] = dbCost.Nanoseconds() - } - if !tmCommit.Before(tmStart) { - fields["t"] = tmCommit.Sub(tmStart).Nanoseconds() - } else if !tmRollback.Before(tmStart) { - fields["t"] = tmRollback.Sub(tmStart).Nanoseconds() - } - log.WithFields(fields).WithError(err).Debug("kayak leader apply") + log.WithField("r", logIndex). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak leader apply") }() r.peersLock.RLock() defer r.peersLock.RUnlock() + tm.Add("peers_lock") + if r.role != proto.Leader { // not leader err = kt.ErrNotLeader return } - tmStart = time.Now() + // prepare + prepareLog, err := r.doLeaderPrepare(ctx, tm, req) - // check prepare in leader - if err = r.doCheck(req); err != nil { - err = errors.Wrap(err, "leader verify log") - return + if prepareLog != nil { + defer r.markPrepareFinished(ctx, prepareLog.Index) } - // encode request - var encBuf []byte - if encBuf, err = r.sh.EncodePayload(req); err != nil { - err = errors.Wrap(err, "encode kayak payload failed") - return - } - - // create prepare request - var prepareLog *kt.Log - if prepareLog, err = r.leaderLogPrepare(encBuf); err != nil { - // serve error, leader could not write logs, change leader in block producer - // TODO(): CHANGE LEADER - return - } - - // Leader pending map handling. - r.markPendingPrepare(prepareLog.Index) - defer r.markPrepareFinished(prepareLog.Index) - - tmLeaderPrepare = time.Now() - - // send prepare to all nodes - prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) - prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) - defer prepareCtxCancelFunc() - prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) - if !prepareDone { - // timeout, rollback - err = kt.ErrPrepareTimeout - goto ROLLBACK + if err == nil { + // commit + return r.doLeaderCommit(ctx, tm, prepareLog, req) } - // collect errors - if err = r.errorSummary(prepareErrors); err != nil { - goto ROLLBACK + // rollback + if prepareLog != nil { + r.doLeaderRollback(ctx, tm, prepareLog) } - tmFollowerPrepare = time.Now() - - commitFuture = r.leaderCommitResult(ctx, req, prepareLog) - - tmCommitEnqueue = time.Now() + return +} - if commitFuture == nil { - logIndex = prepareLog.Index - err = errors.Wrap(ctx.Err(), "enqueue commit timeout") - goto ROLLBACK +// Fetch defines entry for missing log fetch. +func (r *Runtime) Fetch(ctx context.Context, index uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return } - cResult = <-commitFuture - if cResult != nil { - logIndex = prepareLog.Index - result = cResult.result - err = cResult.err - - tmCommitDequeue = cResult.start - dbCost = cResult.dbCost - tmLeaderCommit = time.Now() + tm := timer.NewTimer() - // wait until context deadline or commit done - if cResult.rpc != nil { - cResult.rpc.get(ctx) - } - } else { - log.Fatal("IMPOSSIBLE BRANCH") - select { - case <-ctx.Done(): - err = errors.Wrap(ctx.Err(), "process commit timeout") - goto ROLLBACK - default: - } - } + defer func() { + log.WithField("l", index). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak log fetch") + }() - tmCommit = time.Now() + r.peersLock.RLock() + defer r.peersLock.RUnlock() - return + tm.Add("peers_lock") -ROLLBACK: - // rollback local - var rollbackLog *kt.Log - var logErr error - if rollbackLog, logErr = r.leaderLogRollback(prepareLog.Index); logErr != nil { - // serve error, construct rollback log failed, internal error - // TODO(): CHANGE LEADER + if r.role != proto.Leader { + // not leader + err = kt.ErrNotLeader return } - tmLeaderRollback = time.Now() - - // async send rollback to all nodes - r.rpc(rollbackLog, 0) - - tmRollback = time.Now() - - return + // wal get + return r.wal.Get(index) } // FollowerApply defines entry for follower node. @@ -411,20 +371,32 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { err = errors.Wrap(kt.ErrInvalidLog, "log is nil") return } + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return + } - var tmStart, tmEnd time.Time + ctx, task := trace.NewTask(context.Background(), "Kayak.FollowerApply."+l.Type.String()) + defer task.End() + + tm := timer.NewTimer() defer func() { - log.WithFields(log.Fields{ - "t": l.Type.String(), - "i": l.Index, - "c": tmEnd.Sub(tmStart).Nanoseconds(), - }).WithError(err).Debug("kayak follower apply") + log. + WithFields(log.Fields{ + "t": l.Type.String(), + "i": l.Index, + }). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak follower apply") }() r.peersLock.RLock() defer r.peersLock.RUnlock() + tm.Add("peers_lock") + if r.role == proto.Leader { // not follower err = kt.ErrNotFollower @@ -434,21 +406,16 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { // verify log structure switch l.Type { case kt.LogPrepare: - err = r.followerPrepare(l) + err = r.followerPrepare(ctx, tm, l) case kt.LogRollback: - err = r.followerRollback(l) + err = r.followerRollback(ctx, tm, l) case kt.LogCommit: - err = r.followerCommit(l) - case kt.LogBarrier: - // support barrier for log truncation and peer update - fallthrough - case kt.LogNoop: - // do nothing - err = r.followerNoop(l) + err = r.followerCommit(ctx, tm, l) } if err == nil { - r.updateNextIndex(l) + r.updateNextIndex(ctx, l) + r.triggerLogAwaits(l.Index) } return @@ -462,385 +429,9 @@ func (r *Runtime) UpdatePeers(peers *proto.Peers) (err error) { return } -func (r *Runtime) leaderLogPrepare(data []byte) (*kt.Log, error) { - // just write new log - return r.newLog(kt.LogPrepare, data) -} - -func (r *Runtime) leaderLogRollback(i uint64) (*kt.Log, error) { - // just write new log - return r.newLog(kt.LogRollback, r.uint64ToBytes(i)) -} - -func (r *Runtime) doCheck(req interface{}) (err error) { - if err = r.sh.Check(req); err != nil { - err = errors.Wrap(err, "verify log") - return - } - - return -} - -func (r *Runtime) followerPrepare(l *kt.Log) (err error) { - // decode - var req interface{} - if req, err = r.sh.DecodePayload(l.Data); err != nil { - err = errors.Wrap(err, "decode kayak payload failed") - return - } - - if err = r.doCheck(req); err != nil { - return - } - - // write log - if err = r.wal.Write(l); err != nil { - err = errors.Wrap(err, "write follower prepare log failed") - return - } - - r.markPendingPrepare(l.Index) - - return -} - -func (r *Runtime) followerRollback(l *kt.Log) (err error) { - var prepareLog *kt.Log - if _, prepareLog, err = r.getPrepareLog(l); err != nil || prepareLog == nil { - err = errors.Wrap(err, "get original request in rollback failed") - return - } - - // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { - err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") - return - } - - // write wal - if err = r.wal.Write(l); err != nil { - err = errors.Wrap(err, "write follower rollback log failed") - } - - r.markPrepareFinished(l.Index) - - return -} - -func (r *Runtime) followerCommit(l *kt.Log) (err error) { - var prepareLog *kt.Log - var lastCommit uint64 - if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "get original request in commit failed") - return - } - - // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { - err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") - return - } - - cResult := <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) - if cResult != nil { - err = cResult.err - } - - r.markPrepareFinished(l.Index) - - return -} - -func (r *Runtime) leaderCommitResult(ctx context.Context, reqPayload interface{}, prepareLog *kt.Log) (res chan *commitResult) { - // decode log and send to commit channel to process - res = make(chan *commitResult, 1) - - if prepareLog == nil { - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), - } - return - } - - // decode prepare log - req := &commitReq{ - ctx: ctx, - data: reqPayload, - index: prepareLog.Index, - result: res, - } - - select { - case <-ctx.Done(): - res = nil - case r.commitCh <- req: - } - - return -} - -func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { - // decode log and send to commit channel to process - res = make(chan *commitResult, 1) +func (r *Runtime) updateNextIndex(ctx context.Context, l *kt.Log) { + defer trace.StartRegion(ctx, "updateNextIndex").End() - if prepareLog == nil { - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), - } - return - } - - myLastCommit := atomic.LoadUint64(&r.lastCommit) - - // check committed index - if lastCommit < myLastCommit { - // leader pushed a early index before commit - log.WithFields(log.Fields{ - "head": myLastCommit, - "supplied": lastCommit, - }).Warning("invalid last commit log") - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index"), - } - return - } - - // decode prepare log - var logReq interface{} - var err error - if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { - res <- &commitResult{ - err: errors.Wrap(err, "decode log payload failed"), - } - return - } - - req := &commitReq{ - ctx: ctx, - data: logReq, - index: prepareLog.Index, - lastCommit: lastCommit, - result: res, - log: commitLog, - } - - select { - case <-ctx.Done(): - case r.commitCh <- req: - } - - return -} - -func (r *Runtime) commitCycle() { - // TODO(): panic recovery - for { - var cReq *commitReq - - select { - case <-r.stopCh: - return - case cReq = <-r.commitCh: - } - - if cReq != nil { - r.doCommit(cReq) - } - } -} - -func (r *Runtime) doCommit(req *commitReq) { - r.peersLock.RLock() - defer r.peersLock.RUnlock() - - resp := &commitResult{ - start: time.Now(), - } - - if r.role == proto.Leader { - resp.dbCost, resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) - req.result <- resp - } else { - r.followerDoCommit(req) - } -} - -func (r *Runtime) leaderDoCommit(req *commitReq) (dbCost time.Duration, tracker *rpcTracker, result interface{}, err error) { - if req.log != nil { - // mis-use follower commit for leader - log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") - return - } - - // create leader log - var l *kt.Log - var logData []byte - - logData = append(logData, r.uint64ToBytes(req.index)...) - logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) - - if l, err = r.newLog(kt.LogCommit, logData); err != nil { - // serve error, leader could not write log - return - } - - // not wrapping underlying handler commit error - tmStartDB := time.Now() - result, err = r.sh.Commit(req.data) - dbCost = time.Now().Sub(tmStartDB) - - // mark last commit - atomic.StoreUint64(&r.lastCommit, l.Index) - - // send commit - tracker = r.rpc(l, r.minCommitFollowers) - - // TODO(): text log for rpc errors - - // TODO(): mark uncommitted nodes and remove from peers - - return -} - -func (r *Runtime) followerDoCommit(req *commitReq) (err error) { - if req.log == nil { - log.Fatal("NO LOG FOR FOLLOWER COMMIT") - return - } - - // check for last commit availability - myLastCommit := atomic.LoadUint64(&r.lastCommit) - if req.lastCommit != myLastCommit { - // TODO(): need counter for retries, infinite commit re-order would cause troubles - go func(req *commitReq) { - r.commitCh <- req - }(req) - return - } - - // write log first - if err = r.wal.Write(req.log); err != nil { - err = errors.Wrap(err, "write follower commit log failed") - return - } - - // do commit, not wrapping underlying handler commit error - _, err = r.sh.Commit(req.data) - - // mark last commit - atomic.StoreUint64(&r.lastCommit, req.log.Index) - - req.result <- &commitResult{err: err} - - return -} - -func (r *Runtime) getPrepareLog(l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { - var prepareIndex uint64 - - // decode prepare index - if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { - err = errors.Wrap(err, "log does not contain valid prepare index") - return - } - - // decode commit index - if len(l.Data) >= 16 { - lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) - } - - pl, err = r.wal.Get(prepareIndex) - - return -} - -func (r *Runtime) newLog(logType kt.LogType, data []byte) (l *kt.Log, err error) { - // allocate index - r.nextIndexLock.Lock() - i := r.nextIndex - r.nextIndex++ - r.nextIndexLock.Unlock() - l = &kt.Log{ - LogHeader: kt.LogHeader{ - Index: i, - Type: logType, - Producer: r.nodeID, - }, - Data: data, - } - - // error write will be a fatal error, cause to node to fail fast - if err = r.wal.Write(l); err != nil { - log.Fatalf("WRITE LOG FAILED: %v", err) - } - - return -} - -func (r *Runtime) readLogs() (err error) { - // load logs, only called during init - var l *kt.Log - - for { - if l, err = r.wal.Read(); err != nil && err != io.EOF { - err = errors.Wrap(err, "load previous logs in wal failed") - return - } else if err == io.EOF { - err = nil - break - } - - switch l.Type { - case kt.LogPrepare: - // record in pending prepares - r.pendingPrepares[l.Index] = true - case kt.LogCommit: - // record last commit - var lastCommit uint64 - var prepareLog *kt.Log - if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") - return - } - if lastCommit != r.lastCommit { - err = errors.Wrapf(err, - "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) - return - } - if !r.pendingPrepares[prepareLog.Index] { - err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - return - } - r.lastCommit = l.Index - // resolve previous prepared - delete(r.pendingPrepares, prepareLog.Index) - case kt.LogRollback: - var prepareLog *kt.Log - if _, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") - return - } - if !r.pendingPrepares[prepareLog.Index] { - err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - return - } - // resolve previous prepared - delete(r.pendingPrepares, prepareLog.Index) - case kt.LogBarrier: - case kt.LogNoop: - default: - err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) - return - } - - // record nextIndex - r.updateNextIndex(l) - } - - return -} - -func (r *Runtime) updateNextIndex(l *kt.Log) { r.nextIndexLock.Lock() defer r.nextIndexLock.Unlock() @@ -849,89 +440,29 @@ func (r *Runtime) updateNextIndex(l *kt.Log) { } } -func (r *Runtime) checkIfPrepareFinished(index uint64) (finished bool) { +func (r *Runtime) checkIfPrepareFinished(ctx context.Context, index uint64) (finished bool) { + defer trace.StartRegion(ctx, "checkIfPrepareFinished").End() + r.pendingPreparesLock.RLock() defer r.pendingPreparesLock.RUnlock() return !r.pendingPrepares[index] } -func (r *Runtime) markPendingPrepare(index uint64) { +func (r *Runtime) markPendingPrepare(ctx context.Context, index uint64) { + defer trace.StartRegion(ctx, "markPendingPrepare").End() + r.pendingPreparesLock.Lock() defer r.pendingPreparesLock.Unlock() r.pendingPrepares[index] = true } -func (r *Runtime) markPrepareFinished(index uint64) { +func (r *Runtime) markPrepareFinished(ctx context.Context, index uint64) { + defer trace.StartRegion(ctx, "markPrepareFinished").End() + r.pendingPreparesLock.Lock() defer r.pendingPreparesLock.Unlock() delete(r.pendingPrepares, index) } - -func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { - failNodes := make(map[proto.NodeID]error) - - for s, err := range errs { - if err != nil { - failNodes[s] = err - } - } - - if len(failNodes) == 0 { - return nil - } - - return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) -} - -/// rpc related -func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { - req := &kt.RPCRequest{ - Instance: r.instanceID, - Log: l, - } - - tracker = newTracker(r, req, minCount) - tracker.send() - - // TODO(): track this rpc - - // TODO(): log remote errors - - return -} - -func (r *Runtime) getCaller(id proto.NodeID) Caller { - var caller Caller = rpc.NewPersistentCaller(id) - rawCaller, _ := r.callerMap.LoadOrStore(id, caller) - return rawCaller.(Caller) -} - -func (r *Runtime) goFunc(f func()) { - r.wg.Add(1) - go func() { - defer r.wg.Done() - f() - }() -} - -/// utils -func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { - res = make([]byte, 8) - binary.BigEndian.PutUint64(res, i) - return -} - -func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { - if len(b) < 8 { - return 0, kt.ErrInvalidLog - } - return binary.BigEndian.Uint64(b), nil -} - -//// future extensions, barrier, noop log placeholder etc. -func (r *Runtime) followerNoop(l *kt.Log) (err error) { - return r.wal.Write(l) -} diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index d11b8e92b..2291cd49e 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -31,16 +31,19 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/etls" "github.com/CovenantSQL/CovenantSQL/kayak" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" + crpc "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" + "github.com/xtaci/smux" ) func init() { @@ -103,7 +106,7 @@ func (s *sqliteStorage) Check(data interface{}) (err error) { return nil } -func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) { +func (s *sqliteStorage) Commit(data interface{}, isLeader bool) (result interface{}, err error) { var d *queryStructure var ok bool if d, ok = data.(*queryStructure); !ok { @@ -161,33 +164,65 @@ func newFakeService(rt *kayak.Runtime) (fs *fakeService) { return } -func (s *fakeService) Call(req *kt.RPCRequest, resp *interface{}) (err error) { +func (s *fakeService) Apply(req *kt.ApplyRequest, resp *interface{}) (err error) { // add some delay for timeout test - time.Sleep(time.Millisecond * 10) return s.rt.FollowerApply(req.Log) } +func (s *fakeService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + var l *kt.Log + if l, err = s.rt.Fetch(req.GetContext(), req.Index); err != nil { + return + } + + resp.Log = l + return +} + func (s *fakeService) serveConn(c net.Conn) { - s.s.ServeCodec(utils.GetMsgPackServerCodec(c)) + var r proto.NodeID + s.s.ServeCodec(crpc.NewNodeAwareServerCodec(context.Background(), utils.GetMsgPackServerCodec(c), r.ToRawNodeID())) } type fakeCaller struct { m *fakeMux target proto.NodeID + s *smux.Session } -func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { - return &fakeCaller{ +func newFakeCaller(m *fakeMux, nodeID proto.NodeID) (c *fakeCaller) { + fakeConn := mock_conn.NewConn() + cipher1 := etls.NewCipher([]byte("123")) + cipher2 := etls.NewCipher([]byte("123")) + serverConn := etls.NewConn(fakeConn.Server, cipher1, nil) + clientConn := etls.NewConn(fakeConn.Client, cipher2, nil) + + muxSess, _ := smux.Server(serverConn, smux.DefaultConfig()) + go func() { + for { + s, err := muxSess.AcceptStream() + if err != nil { + break + } + + go c.m.get(c.target).serveConn(s) + } + }() + + muxClientSess, _ := smux.Client(clientConn, smux.DefaultConfig()) + + c = &fakeCaller{ m: m, target: nodeID, + s: muxClientSess, } + + return } func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { - fakeConn := mock_conn.NewConn() - - go c.m.get(c.target).serveConn(fakeConn.Server) - client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) + s, err := c.s.OpenStream() + client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(s)) defer client.Close() return client.Call(method, req, resp) @@ -196,8 +231,9 @@ func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err func TestRuntime(t *testing.T) { Convey("runtime test", t, func(c C) { lvl := log.GetLevel() - log.SetLevel(log.FatalLevel) + log.SetLevel(log.DebugLevel) defer log.SetLevel(lvl) + db1, err := newSQLiteStorage("test1.db") So(err, ShouldBeNil) defer func() { @@ -237,11 +273,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal1, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) @@ -254,11 +291,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal2, NodeID: node2, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt2, err := kayak.NewRuntime(cfg2) So(err, ShouldBeNil) @@ -314,7 +352,7 @@ func TestRuntime(t *testing.T) { var count uint64 atomic.StoreUint64(&count, 1) - for i := 0; i != 100; i++ { + for i := 0; i != 2000; i++ { atomic.AddUint64(&count, 1) q := &queryStructure{ Queries: []storage.Query{ @@ -482,11 +520,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: w, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt, err := kayak.NewRuntime(cfg) So(err, ShouldBeNil) @@ -501,7 +540,7 @@ func TestRuntime(t *testing.T) { func BenchmarkRuntime(b *testing.B) { Convey("runtime test", b, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) So(err, ShouldBeNil) log.SetOutput(f) @@ -543,14 +582,15 @@ func BenchmarkRuntime(b *testing.B) { cfg1 := &kt.RuntimeConfig{ Handler: db1, PrepareThreshold: 1.0, - CommitThreshold: 1.0, + CommitThreshold: 0.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal1, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) @@ -560,14 +600,15 @@ func BenchmarkRuntime(b *testing.B) { cfg2 := &kt.RuntimeConfig{ Handler: db2, PrepareThreshold: 1.0, - CommitThreshold: 1.0, + CommitThreshold: 0.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal2, NodeID: node2, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt2, err := kayak.NewRuntime(cfg2) So(err, ShouldBeNil) @@ -656,7 +697,8 @@ func BenchmarkRuntime(b *testing.B) { }) So(d1, ShouldHaveLength, 1) So(d1[0], ShouldHaveLength, 1) - So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + _ = total + //So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) //_, _, d2, _ := db2.Query(context.Background(), []storage.Query{ // {Pattern: "SELECT COUNT(1) FROM test"}, diff --git a/kayak/tracker.go b/kayak/tracker.go index 986a9198b..f94440636 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -21,7 +21,9 @@ import ( "sync" "sync/atomic" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/trace" ) // rpcTracker defines the rpc call tracker @@ -63,7 +65,7 @@ func newTracker(r *Runtime, req interface{}, minCount int) (t *rpcTracker) { t = &rpcTracker{ r: r, nodes: nodes, - method: r.rpcMethod, + method: r.applyRPCMethod, req: req, minCount: minCount, errors: make(map[proto.NodeID]error, len(nodes)), @@ -114,6 +116,17 @@ func (t *rpcTracker) done() { } func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, meets bool, finished bool) { + if trace.IsEnabled() { + // get request log type + traceType := "rpcCall" + + if rawReq, ok := t.req.(*kt.ApplyRequest); ok { + traceType += rawReq.Log.Type.String() + } + + defer trace.StartRegion(ctx, traceType).End() + } + for { select { case <-t.doneCh: diff --git a/kayak/tracker_test.go b/kayak/tracker_test.go index 6ffccce6b..f2e82acfe 100644 --- a/kayak/tracker_test.go +++ b/kayak/tracker_test.go @@ -44,7 +44,7 @@ func TestTracker(t *testing.T) { nodeID1 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") nodeID2 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") r := &Runtime{ - rpcMethod: "test", + applyRPCMethod: "test", followers: []proto.NodeID{ nodeID1, nodeID2, diff --git a/kayak/types/config.go b/kayak/types/config.go index 0407a5e4a..15170c957 100644 --- a/kayak/types/config.go +++ b/kayak/types/config.go @@ -44,6 +44,10 @@ type RuntimeConfig struct { InstanceID string // mux service name. ServiceName string - // mux service method. - MethodName string + // apply service method. + ApplyMethodName string + // fetch service method. + FetchMethodName string + // fetch timeout. + LogWaitTimeout time.Duration } diff --git a/kayak/types/errors.go b/kayak/types/errors.go index 2912207d7..2bd7b8686 100644 --- a/kayak/types/errors.go +++ b/kayak/types/errors.go @@ -33,4 +33,6 @@ var ( ErrNotInPeer = errors.New("node not in peer") // ErrInvalidConfig represents invalid kayak runtime config. ErrInvalidConfig = errors.New("invalid runtime config") + // ErrStopped represents runtime not started. + ErrStopped = errors.New("stopped") ) diff --git a/kayak/types/handler.go b/kayak/types/handler.go index c74b053e2..3be587072 100644 --- a/kayak/types/handler.go +++ b/kayak/types/handler.go @@ -21,5 +21,5 @@ type Handler interface { EncodePayload(req interface{}) (data []byte, err error) DecodePayload(data []byte) (req interface{}, err error) Check(request interface{}) error - Commit(request interface{}) (result interface{}, err error) + Commit(request interface{}, isLeader bool) (result interface{}, err error) } diff --git a/kayak/types/rpc.go b/kayak/types/rpc.go index 7b96f42aa..f6ee63a11 100644 --- a/kayak/types/rpc.go +++ b/kayak/types/rpc.go @@ -18,8 +18,22 @@ package types import "github.com/CovenantSQL/CovenantSQL/proto" -// RPCRequest defines the RPC request entity. -type RPCRequest struct { +// ApplyRequest defines the apply request entity. +type ApplyRequest struct { + proto.Envelope + Instance string + Log *Log +} + +// FetchRequest defines the fetch request entity. +type FetchRequest struct { + proto.Envelope + Instance string + Index uint64 +} + +// FetchResponse defines the fetch response entity. +type FetchResponse struct { proto.Envelope Instance string Log *Log diff --git a/kayak/utils.go b/kayak/utils.go new file mode 100644 index 000000000..71ffa6d18 --- /dev/null +++ b/kayak/utils.go @@ -0,0 +1,53 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "encoding/binary" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" +) + +func (r *Runtime) getCaller(id proto.NodeID) Caller { + var caller Caller = rpc.NewPersistentCaller(id) + rawCaller, _ := r.callerMap.LoadOrStore(id, caller) + return rawCaller.(Caller) +} + +func (r *Runtime) goFunc(f func()) { + r.wg.Add(1) + go func() { + defer r.wg.Done() + f() + }() +} + +/// utils +func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, i) + return +} + +func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { + if len(b) < 8 { + return 0, kt.ErrInvalidLog + } + return binary.BigEndian.Uint64(b), nil +} diff --git a/kayak/waiter.go b/kayak/waiter.go new file mode 100644 index 000000000..675038806 --- /dev/null +++ b/kayak/waiter.go @@ -0,0 +1,88 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "sync" + "time" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/trace" +) + +type waitItem struct { + index uint64 + doneOnce sync.Once + ch chan struct{} + waitLock sync.Mutex +} + +func newWaitItem(index uint64) *waitItem { + return &waitItem{ + index: index, + ch: make(chan struct{}), + } +} + +func (r *Runtime) waitForLog(ctx context.Context, index uint64) (l *kt.Log, err error) { + defer trace.StartRegion(ctx, "waitForLog").End() + + for { + if l, err = r.wal.Get(index); err == nil { + // exists + return + } + + rawItem, _ := r.waitLogMap.LoadOrStore(index, newWaitItem(index)) + item := rawItem.(*waitItem) + + if item == nil { + err = kt.ErrInvalidLog + return + } + + select { + case <-item.ch: + r.waitLogMap.Delete(index) + case <-time.After(r.logWaitTimeout): + r.markMissingLog(index) + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + +func (r *Runtime) triggerLogAwaits(index uint64) { + rawItem, ok := r.waitLogMap.Load(index) + if !ok || rawItem == nil { + return + } + + item := rawItem.(*waitItem) + + if item == nil { + return + } + + item.doneOnce.Do(func() { + if item.ch != nil { + close(item.ch) + } + }) +} diff --git a/metric/collector.go b/metric/collector.go index 2ed5d01f1..5a574011c 100644 --- a/metric/collector.go +++ b/metric/collector.go @@ -54,7 +54,7 @@ func NewNodeCollector() (*NodeCollector, error) { collectors["cpu"], _ = NewCPUCollector() collectors["diskstats"], _ = NewDiskstatsCollector() collectors["filesystem"], _ = NewFilesystemCollector() - collectors["ntp"], _ = NewNtpCollector() + //collectors["ntp"], _ = NewNtpCollector() collectors["loadavg"], _ = NewLoadavgCollector() return &NodeCollector{Collectors: collectors}, nil diff --git a/metric/loadavg.go b/metric/loadavg.go index 8519a2e52..4e333dfd2 100644 --- a/metric/loadavg.go +++ b/metric/loadavg.go @@ -19,8 +19,9 @@ package metric import ( "fmt" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/prometheus/client_golang/prometheus" + + "github.com/CovenantSQL/CovenantSQL/utils/log" ) type loadavgCollector struct { diff --git a/metric/metric.go b/metric/metric.go index 36a55eb29..76b21dbfd 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -24,23 +24,6 @@ import ( "github.com/prometheus/common/version" ) -const ( - // KB is 1024 Bytes - KB int64 = 1024 - // MB is 1024 KB - MB int64 = KB * 1024 - // GB is 1024 MB - GB int64 = MB * 1024 - // TB is 1024 GB - TB int64 = GB * 1024 - // PB is 1024 TB - PB int64 = TB * 1024 - // EB is 1024 PB - EB int64 = TB * 1024 - // ZB is 1024 EB - ZB int64 = TB * 1024 -) - func init() { prometheus.MustRegister(version.NewCollector("CovenantSQL")) } diff --git a/metric/metricweb.go b/metric/metricweb.go new file mode 100644 index 000000000..8bbc092d1 --- /dev/null +++ b/metric/metricweb.go @@ -0,0 +1,93 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "expvar" + "net/http" + "runtime" + "time" + + "github.com/pkg/errors" + mw "github.com/zserge/metric" + + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func collect(cc *CollectClient) (err error) { + mfs, err := cc.Registry.Gather() + if err != nil { + err = errors.Wrap(err, "gathering node metrics failed") + return + } + mm := make(SimpleMetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + crucialMetrics := mm.FilterCrucialMetrics() + for k, v := range crucialMetrics { + var val expvar.Var + if val = expvar.Get(k); val == nil { + expvar.Publish(k, mw.NewGauge("1h1m")) + val = expvar.Get(k) + } + val.(mw.Metric).Add(v) + } + + return +} + +// InitMetricWeb initializes the /debug/metrics web +func InitMetricWeb(metricWeb string) (err error) { + // Some Go internal metrics + expvar.Publish("go:numgoroutine", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:numcgocall", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:alloc", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:alloctotal", mw.NewGauge("1m1s", "5m5s", "1h1m")) + + // start period provide service transaction generator + // start prometheus collector + cc := NewCollectClient() + err = collect(cc) + if err != nil { + return + } + + go func() { + for range time.Tick(time.Minute) { + _ = collect(cc) + } + }() + + go func() { + for range time.Tick(5 * time.Second) { + m := &runtime.MemStats{} + runtime.ReadMemStats(m) + expvar.Get("go:numgoroutine").(mw.Metric).Add(float64(runtime.NumGoroutine())) + expvar.Get("go:numcgocall").(mw.Metric).Add(float64(runtime.NumCgoCall())) + expvar.Get("go:alloc").(mw.Metric).Add(float64(m.Alloc) / float64(utils.MB)) + expvar.Get("go:alloctotal").(mw.Metric).Add(float64(m.TotalAlloc) / float64(utils.MB)) + } + }() + http.Handle("/debug/metrics", mw.Handler(mw.Exposed)) + go func() { + _ = http.ListenAndServe(metricWeb, nil) + }() + return +} diff --git a/metric/metricweb_test.go b/metric/metricweb_test.go new file mode 100644 index 000000000..1213960ed --- /dev/null +++ b/metric/metricweb_test.go @@ -0,0 +1,31 @@ +package metric + +import ( + "fmt" + "net/http" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/CovenantSQL/CovenantSQL/utils" +) + +func TestInitMetricWeb(t *testing.T) { + Convey("init metric web", t, func() { + ports, err := utils.GetRandomPorts("127.0.0.1", 1025, 60000, 1) + So(err, ShouldBeNil) + addr := fmt.Sprintf("127.0.0.1:%d", ports[0]) + err = InitMetricWeb(addr) + So(err, ShouldBeNil) + time.Sleep(7 * time.Second) + resp, err := http.Get("http://" + addr + "/debug/metrics") + So(err, ShouldBeNil) + buf := make([]byte, 40960) + _, err = resp.Body.Read(buf) + So(err, ShouldBeNil) + So(string(buf), ShouldContainSubstring, "cpu_count") + So(string(buf), ShouldContainSubstring, "fs_avail") + So(string(buf), ShouldContainSubstring, "go:alloc") + }) +} diff --git a/metric/nodemetricmap.go b/metric/nodemetricmap.go new file mode 100644 index 000000000..5f0757918 --- /dev/null +++ b/metric/nodemetricmap.go @@ -0,0 +1,142 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "sync" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" + dto "github.com/prometheus/client_model/go" +) + +// SimpleMetricMap is map from metric name to MetricFamily. +type SimpleMetricMap map[string]*dto.MetricFamily + +// NodeCrucialMetricMap is map[NodeID][MetricName]Value +type NodeCrucialMetricMap map[proto.NodeID]map[string]float64 + +// FilterFunc is a function that knows how to filter a specific node +// that match the metric limits. if node picked return true else false. +type FilterFunc func(key proto.NodeID, value SimpleMetricMap) bool + +// NodeMetricMap is sync.Map version of map[proto.NodeID]SimpleMetricMap. +type NodeMetricMap struct { + sync.Map // map[proto.NodeID]SimpleMetricMap +} + +// FilterNode return node id slice make filterFunc return true. +func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) { + nodePicker := func(key, value interface{}) bool { + id, ok := key.(proto.NodeID) + if !ok { + return true // continue iteration + } + metrics, ok := value.(SimpleMetricMap) + if !ok { + return true // continue iteration + } + if filterFunc(id, metrics) { + ret = append(ret, id) + } + return true + } + nmm.Range(nodePicker) + return +} + +// GetMetrics returns nodes metrics. +func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]SimpleMetricMap) { + metrics = make(map[proto.NodeID]SimpleMetricMap) + + for _, node := range nodes { + var ok bool + var rawNodeMetrics interface{} + + if rawNodeMetrics, ok = nmm.Load(node); !ok { + continue + } + + var nodeMetrics SimpleMetricMap + + if nodeMetrics, ok = rawNodeMetrics.(SimpleMetricMap); !ok { + continue + } + + metrics[node] = nodeMetrics + } + + return +} + +// FilterCrucialMetrics filters crucial metrics and also add cpu_count +func (mfm *SimpleMetricMap) FilterCrucialMetrics() (ret map[string]float64) { + crucialMetricNameMap := map[string]string{ + "node_memory_MemAvailable_bytes": "mem_avail", + "node_load1": "load1", + "node_load5": "load5", + "node_load15": "load15", + "node_ntp_offset_seconds": "ntp_offset", + "node_filesystem_free_bytes": "fs_avail", + "node_cpu_count": "cpu_count", + } + ret = make(map[string]float64) + for _, v := range *mfm { + if newName, ok := crucialMetricNameMap[*v.Name]; ok { + var metricVal float64 + switch v.GetType() { + case dto.MetricType_GAUGE: + metricVal = v.GetMetric()[0].GetGauge().GetValue() + case dto.MetricType_COUNTER: + metricVal = v.GetMetric()[0].GetCounter().GetValue() + case dto.MetricType_HISTOGRAM: + metricVal = v.GetMetric()[0].GetHistogram().GetBucket()[0].GetUpperBound() + case dto.MetricType_SUMMARY: + metricVal = v.GetMetric()[0].GetSummary().GetQuantile()[0].GetValue() + case dto.MetricType_UNTYPED: + metricVal = v.GetMetric()[0].GetUntyped().GetValue() + default: + continue + } + ret[newName] = metricVal + } + } + log.Debugf("crucial Metric added: %v", ret) + + return +} + +// GetCrucialMetrics gets NodeCrucialMetricMap from NodeMetricMap +func (nmm *NodeMetricMap) GetCrucialMetrics() (ret NodeCrucialMetricMap) { + ret = make(NodeCrucialMetricMap) + metricsPicker := func(key, value interface{}) bool { + nodeID, ok := key.(proto.NodeID) + if !ok { + return true // continue iteration + } + mfm, ok := value.(SimpleMetricMap) + if !ok { + return true // continue iteration + } + + ret[nodeID] = mfm.FilterCrucialMetrics() + return true // continue iteration + } + nmm.Range(metricsPicker) + + return +} diff --git a/metric/nodemetricmap_test.go b/metric/nodemetricmap_test.go new file mode 100644 index 000000000..bb3d78bd8 --- /dev/null +++ b/metric/nodemetricmap_test.go @@ -0,0 +1,97 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" + dto "github.com/prometheus/client_model/go" + . "github.com/smartystreets/goconvey/convey" +) + +func TestCollectServer_FilterNode(t *testing.T) { + log.SetLevel(log.DebugLevel) + filterTrue := func(key proto.NodeID, value SimpleMetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + return true + } + filterFalse := func(key proto.NodeID, value SimpleMetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + return false + } + filterMem1MB := func(key proto.NodeID, value SimpleMetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + var v *dto.MetricFamily + v, ok := value["node_memory_bytes_total"] + if !ok { + v, ok = value["node_memory_MemTotal_bytes"] + } + if ok && len(v.Metric) > 0 && + v.Metric[0].GetGauge() != nil && + v.Metric[0].GetGauge().Value != nil && + *v.Metric[0].GetGauge().Value > float64(1*utils.MB) { + log.Debugf("has memory: %fGB", *v.Metric[0].GetGauge().Value/float64(utils.GB)) + return true + } + + return false + } + Convey("filter node", t, func() { + cc := NewCollectClient() + mfs, _ := cc.Registry.Gather() + mm := make(SimpleMetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + nmm := NodeMetricMap{} + nmm.Store(proto.NodeID("node1"), mm) + nmm.Store(proto.NodeID("node2"), nil) + nmm.Store(proto.NodeID("node3"), mm) + So(len(mm), ShouldEqual, len(mfs)) + So(len(mm), ShouldBeGreaterThan, 2) + + ids := nmm.FilterNode(filterTrue) + So(len(ids), ShouldEqual, 2) + + ids1 := nmm.FilterNode(filterMem1MB) + So(len(ids1), ShouldEqual, 2) + + ids2 := nmm.FilterNode(filterFalse) + So(len(ids2), ShouldEqual, 0) + }) + Convey("filter metrics", t, func() { + cc := NewCollectClient() + mfs, _ := cc.Registry.Gather() + mm := make(SimpleMetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + nmm := NodeMetricMap{} + nmm.Store(proto.NodeID("node1"), mm) + nmm.Store(proto.NodeID("node2"), nil) + + cmm := nmm.GetCrucialMetrics() + So(len(cmm), ShouldEqual, 1) + So(len(cmm["node1"]), ShouldBeGreaterThanOrEqualTo, 6) + }) + +} diff --git a/metric/rpc.go b/metric/rpc.go new file mode 100644 index 000000000..30977a5bf --- /dev/null +++ b/metric/rpc.go @@ -0,0 +1,152 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "bytes" + "errors" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" +) + +// MetricServiceName is the RPC name +const MetricServiceName = "Metric" + +// CollectClient is the Metric Collect Client +type CollectClient struct { + Registry *prometheus.Registry +} + +// NewCollectClient returns a new CollectClient +func NewCollectClient() *CollectClient { + reg := StartMetricCollector() + if reg == nil { + log.Fatal("StartMetricCollector failed") + } + + return &CollectClient{ + Registry: reg, + } +} + +// CollectServer is the Metric receiver side +type CollectServer struct { + NodeMetric NodeMetricMap // map[proto.NodeID]SimpleMetricMap +} + +// NewCollectServer returns a new CollectServer +func NewCollectServer() *CollectServer { + return &CollectServer{ + NodeMetric: NodeMetricMap{}, + } +} + +// UploadMetrics RPC uploads metric info +func (cs *CollectServer) UploadMetrics(req *proto.UploadMetricsReq, resp *proto.UploadMetricsResp) (err error) { + reqNodeID := req.GetNodeID().ToNodeID() + if reqNodeID.IsEmpty() { + err = errors.New("empty node id") + log.Error(err) + return + } + if !route.IsPermitted(&req.Envelope, route.MetricUploadMetrics) { + err = fmt.Errorf("calling from node %s is not permitted", reqNodeID) + log.Error(err) + return + } + + mfm := make(SimpleMetricMap, len(req.MFBytes)) + log.Debugf("RPC received MFS len %d", len(req.MFBytes)) + for _, mf := range req.MFBytes[:] { + bufReader := bytes.NewReader(mf) + //mf := new(dto.MetricFamily) + //dec := expfmt.NewDecoder(bufReader, expfmt.FmtProtoCompact) + //err = dec.Decode(mf) + tp := expfmt.TextParser{} + mf, err := tp.TextToMetricFamilies(bufReader) + if err != nil { + log.Warnf("decode MetricFamily failed: %s", err) + continue + } + //log.Debugf("RPC received MF: %#v", mf) + for k, v := range mf { + mfm[k] = v + } + } + //log.Debugf("MetricFamily uploaded: %v, %v", reqNodeID, mfm) + if len(mfm) > 0 { + cs.NodeMetric.Store(reqNodeID, mfm) + } else { + err = errors.New("no valid metric received") + log.Error(err) + } + return +} + +// GatherMetricBytes gathers the registered metric info and encode it to [][]byte +func (cc *CollectClient) GatherMetricBytes() (mfb [][]byte, err error) { + mfs, err := cc.Registry.Gather() + if err != nil { + log.Errorf("gather metrics failed: %s", err) + return + } + mfb = make([][]byte, 0, len(mfs)) + for _, mf := range mfs[:] { + //log.Debugf("mf: %s", mf.String()) + buf := new(bytes.Buffer) + //enc := expfmt.NewEncoder(buf, expfmt.FmtProtoCompact) + //err = enc.Encode(mf) + _, err := expfmt.MetricFamilyToText(buf, mf) + if err != nil { + log.Warnf("encode MetricFamily failed: %s", err) + continue + } + mfb = append(mfb, buf.Bytes()) + } + if len(mfb) == 0 { + err = errors.New("no valid metric gathered") + } + + return +} + +// UploadMetrics calls RPC UploadMetrics to upload its metric info +func (cc *CollectClient) UploadMetrics(BPNodeID proto.NodeID) (err error) { + mfb, err := cc.GatherMetricBytes() + if err != nil { + log.Errorf("GatherMetricBytes failed: %s", err) + return + } + log.Debugf("calling BP: %s", BPNodeID) + reqType := MetricServiceName + ".UploadMetrics" + req := &proto.UploadMetricsReq{ + MFBytes: mfb, + } + resp := new(proto.UploadMetricsResp) + err = rpc.NewCaller().CallNode(BPNodeID, reqType, req, resp) + if err != nil { + log.Errorf("calling RPC %s failed: %s", reqType, err) + } + log.Debugf("resp %s: %v", reqType, resp) + return +} diff --git a/metric/rpc_test.go b/metric/rpc_test.go new file mode 100644 index 000000000..d90f185e8 --- /dev/null +++ b/metric/rpc_test.go @@ -0,0 +1,82 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/consistent" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + . "github.com/smartystreets/goconvey/convey" +) + +const PubKeyStorePath = "./public.keystore" + +func TestCollectClient_UploadMetrics(t *testing.T) { + defer os.Remove(PubKeyStorePath) + log.SetLevel(log.DebugLevel) + addr := "127.0.0.1:0" + masterKey := []byte("abc") + + cc := NewCollectClient() + cs := NewCollectServer() + + server, err := rpc.NewServerWithService(rpc.ServiceMap{MetricServiceName: cs}) + if err != nil { + log.Fatal(err) + } + + route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), false) + server.InitRPCServer(addr, "../keys/test.key", masterKey) + go server.Serve() + + publicKey, err := kms.GetLocalPublicKey() + nonce := asymmetric.GetPubKeyNonce(publicKey, 10, 100*time.Millisecond, nil) + serverNodeID := proto.NodeID(nonce.Hash.String()) + kms.SetPublicKey(serverNodeID, nonce.Nonce, publicKey) + kms.SetLocalNodeIDNonce(nonce.Hash.CloneBytes(), &nonce.Nonce) + route.SetNodeAddrCache(&proto.RawNodeID{Hash: nonce.Hash}, server.Listener.Addr().String()) + + Convey("get metric and upload by RPC", t, func() { + err = cc.UploadMetrics(serverNodeID) + v, ok := cs.NodeMetric.Load(serverNodeID) + So(ok, ShouldBeTrue) + //log.Debugf("NodeMetric:%#v", v) + + m, _ := v.(SimpleMetricMap) + mfb, err := cc.GatherMetricBytes() + So(err, ShouldBeNil) + So(len(m), ShouldEqual, len(mfb)) + So(len(m), ShouldBeGreaterThan, 2) + }) + + Convey("get metric and upload by simply called without node id", t, func() { + req := &proto.UploadMetricsReq{ + MFBytes: nil, + Envelope: proto.Envelope{}, + } + err = cs.UploadMetrics(req, &proto.UploadMetricsResp{}) + So(err, ShouldNotBeNil) + }) +} diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 32b315d7d..c6ef33b49 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -63,6 +63,16 @@ func (z AccountAddress) MarshalJSON() ([]byte, error) { return ((hash.Hash)(z)).MarshalJSON() } +// MarshalYAML implements the yaml.Marshaler interface. +func (z AccountAddress) MarshalYAML() (interface{}, error) { + return ((hash.Hash)(z)).MarshalYAML() +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (z *AccountAddress) UnmarshalYAML(unmarshal func(interface{}) error) error { + return ((*hash.Hash)(z)).UnmarshalYAML(unmarshal) +} + // NodeKey is node key on consistent hash ring, generate from Hash(NodeID). type NodeKey RawNodeID @@ -83,8 +93,8 @@ func (z *AccountAddress) Msgsize() (s int) { } // String is a string variable. -func (z *AccountAddress) String() string { - return (*hash.Hash)(z).String() +func (z AccountAddress) String() string { + return (hash.Hash)(z).String() } // Less return true if k is less than y. diff --git a/proto/nodeinfo_gen.go b/proto/nodeinfo_gen.go index 8d729e099..ecda2a967 100644 --- a/proto/nodeinfo_gen.go +++ b/proto/nodeinfo_gen.go @@ -11,27 +11,26 @@ func (z *AddrAndGas) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.AccountAddress.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendUint64(o, z.GasAmount) // map header, size 1 - o = append(o, 0x83, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.RawNodeID.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.GasAmount) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AddrAndGas) Msgsize() (s int) { - s = 1 + 15 + z.AccountAddress.Msgsize() + 10 + 1 + 5 + z.RawNodeID.Hash.Msgsize() + 10 + hsp.Uint64Size + s = 1 + 15 + z.AccountAddress.Msgsize() + 10 + hsp.Uint64Size + 10 + 1 + 5 + z.RawNodeID.Hash.Msgsize() return } @@ -40,7 +39,14 @@ func (z *Node) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) + o = append(o, 0x85) + o = hsp.AppendString(o, z.Addr) + o = hsp.AppendString(o, string(z.ID)) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if z.PublicKey == nil { o = hsp.AppendNil(o) } else { @@ -50,30 +56,19 @@ func (z *Node) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x85) - o = hsp.AppendString(o, string(z.ID)) - o = append(o, 0x85) o = hsp.AppendInt(o, int(z.Role)) - o = append(o, 0x85) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x85) - o = hsp.AppendString(o, z.Addr) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Node) Msgsize() (s int) { - s = 1 + 10 + s = 1 + 5 + hsp.StringPrefixSize + len(z.Addr) + 3 + hsp.StringPrefixSize + len(string(z.ID)) + 6 + z.Nonce.Msgsize() + 10 if z.PublicKey == nil { s += hsp.NilSize } else { s += z.PublicKey.Msgsize() } - s += 3 + hsp.StringPrefixSize + len(string(z.ID)) + 5 + hsp.IntSize + 6 + z.Nonce.Msgsize() + 5 + hsp.StringPrefixSize + len(z.Addr) + s += 5 + hsp.IntSize return } @@ -96,7 +91,7 @@ func (z *NodeKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { @@ -116,7 +111,7 @@ func (z *RawNodeID) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { diff --git a/proto/nodeinfo_test.go b/proto/nodeinfo_test.go index fb0461fae..15ebf2895 100644 --- a/proto/nodeinfo_test.go +++ b/proto/nodeinfo_test.go @@ -96,7 +96,7 @@ func TestServerRoles_Contains(t *testing.T) { }) } -func unmarshalAndMarshal(str string) string { +func unmarshalAndMarshalServerRole(str string) string { var role ServerRole yaml.Unmarshal([]byte(str), &role) ret, _ := yaml.Marshal(role) @@ -104,16 +104,31 @@ func unmarshalAndMarshal(str string) string { return strings.TrimSpace(string(ret)) } +func unmarshalAndMarshalAccountAddress(str string) string { + var addr AccountAddress + yaml.Unmarshal([]byte(str), &addr) + ret, _ := yaml.Marshal(addr) + + return strings.TrimSpace(string(ret)) +} + +func TestAccountAddress_MarshalYAML(t *testing.T) { + Convey("marshal unmarshal yaml", t, func() { + So(unmarshalAndMarshalAccountAddress("6d5e7b36f5fa83d538539f31cf46682b0df3e0ecd192f2331dcf73e7e5ab5686"), + ShouldEqual, "6d5e7b36f5fa83d538539f31cf46682b0df3e0ecd192f2331dcf73e7e5ab5686") + }) +} + func TestServerRole_MarshalYAML(t *testing.T) { Convey("marshal unmarshal yaml", t, func() { var role ServerRole s, _ := role.MarshalYAML() So(s, ShouldResemble, "Unknown") - So(unmarshalAndMarshal("unknown"), ShouldEqual, "Unknown") - So(unmarshalAndMarshal("leader"), ShouldEqual, "Leader") - So(unmarshalAndMarshal("follower"), ShouldEqual, "Follower") - So(unmarshalAndMarshal("miner"), ShouldEqual, "Miner") - So(unmarshalAndMarshal("client"), ShouldEqual, "Client") + So(unmarshalAndMarshalServerRole("unknown"), ShouldEqual, "Unknown") + So(unmarshalAndMarshalServerRole("leader"), ShouldEqual, "Leader") + So(unmarshalAndMarshalServerRole("follower"), ShouldEqual, "Follower") + So(unmarshalAndMarshalServerRole("miner"), ShouldEqual, "Miner") + So(unmarshalAndMarshalServerRole("client"), ShouldEqual, "Client") }) } diff --git a/proto/proto_gen.go b/proto/proto_gen.go index d0aab0198..e0517ded7 100644 --- a/proto/proto_gen.go +++ b/proto/proto_gen.go @@ -25,7 +25,8 @@ func (z *Envelope) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + o = hsp.AppendInt64(o, int64(z.Expire)) if z.NodeID == nil { o = hsp.AppendNil(o) } else { @@ -35,24 +36,20 @@ func (z *Envelope) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendString(o, z.Version) - o = append(o, 0x84) o = hsp.AppendInt64(o, int64(z.TTL)) - o = append(o, 0x84) - o = hsp.AppendInt64(o, int64(z.Expire)) + o = hsp.AppendString(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Envelope) Msgsize() (s int) { - s = 1 + 7 + s = 1 + 7 + hsp.Int64Size + 7 if z.NodeID == nil { s += hsp.NilSize } else { s += z.NodeID.Msgsize() } - s += 8 + hsp.StringPrefixSize + len(z.Version) + 4 + hsp.Int64Size + 7 + hsp.Int64Size + s += 4 + hsp.Int64Size + 8 + hsp.StringPrefixSize + len(z.Version) return } @@ -61,19 +58,18 @@ func (z *FindNeighborReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + o = hsp.AppendInt(o, z.Count) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) if oTemp, err := z.ID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.Roles))) for za0001 := range z.Roles { if oTemp, err := z.Roles[za0001].MarshalHash(); err != nil { @@ -82,18 +78,15 @@ func (z *FindNeighborReq) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendInt(o, z.Count) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNeighborReq) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 3 + z.ID.Msgsize() + 6 + hsp.ArrayHeaderSize + s = 1 + 6 + hsp.IntSize + 9 + z.Envelope.Msgsize() + 3 + z.ID.Msgsize() + 6 + hsp.ArrayHeaderSize for za0001 := range z.Roles { s += z.Roles[za0001].Msgsize() } - s += 6 + hsp.IntSize return } @@ -102,13 +95,13 @@ func (z *FindNeighborResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) + o = hsp.AppendString(o, z.Msg) o = hsp.AppendArrayHeader(o, uint32(len(z.Nodes))) for za0001 := range z.Nodes { if oTemp, err := z.Nodes[za0001].MarshalHash(); err != nil { @@ -117,18 +110,15 @@ func (z *FindNeighborResp) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - o = hsp.AppendString(o, z.Msg) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNeighborResp) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 6 + hsp.ArrayHeaderSize + s = 1 + 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) + 6 + hsp.ArrayHeaderSize for za0001 := range z.Nodes { s += z.Nodes[za0001].Msgsize() } - s += 4 + hsp.StringPrefixSize + len(z.Msg) return } @@ -137,13 +127,12 @@ func (z *FindNodeReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.ID.MarshalHash(); err != nil { return nil, err } else { @@ -163,7 +152,13 @@ func (z *FindNodeResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendString(o, z.Msg) if z.Node == nil { o = hsp.AppendNil(o) } else { @@ -173,26 +168,17 @@ func (z *FindNodeResp) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendString(o, z.Msg) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNodeResp) Msgsize() (s int) { - s = 1 + 5 + s = 1 + 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) + 5 if z.Node == nil { s += hsp.NilSize } else { s += z.Node.Msgsize() } - s += 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) return } @@ -201,13 +187,12 @@ func (z *PingReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.Node.MarshalHash(); err != nil { return nil, err } else { @@ -227,13 +212,12 @@ func (z *PingResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.Msg) return } @@ -249,13 +233,12 @@ func (z *UploadMetricsReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.MFBytes))) for za0001 := range z.MFBytes { o = hsp.AppendBytes(o, z.MFBytes[za0001]) @@ -277,13 +260,12 @@ func (z *UploadMetricsResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.Msg) return } diff --git a/proto/servers_gen.go b/proto/servers_gen.go index 8041564e7..bf852302d 100644 --- a/proto/servers_gen.go +++ b/proto/servers_gen.go @@ -11,14 +11,13 @@ func (z *Peers) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -28,7 +27,7 @@ func (z *Peers) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Peers) Msgsize() (s int) { - s = 1 + 12 + z.PeersHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 12 + z.PeersHeader.Msgsize() return } @@ -37,13 +36,12 @@ func (z *PeersHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) if oTemp, err := z.Leader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) for za0001 := range z.Servers { if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { @@ -52,10 +50,8 @@ func (z *PeersHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendUint64(o, z.Version) - o = append(o, 0x84) o = hsp.AppendUint64(o, z.Term) + o = hsp.AppendUint64(o, z.Version) return } @@ -65,6 +61,6 @@ func (z *PeersHeader) Msgsize() (s int) { for za0001 := range z.Servers { s += z.Servers[za0001].Msgsize() } - s += 8 + hsp.Uint64Size + 5 + hsp.Uint64Size + s += 5 + hsp.Uint64Size + 8 + hsp.Uint64Size return } diff --git a/route/acl.go b/route/acl.go index f62cb403e..b21d6470c 100644 --- a/route/acl.go +++ b/route/acl.go @@ -41,6 +41,9 @@ ACLs: Miner -> Miner, Kayak.Call(): ACL: Open to Miner Leader. + Miner -> BP, Metric.UploadMetrics(): + ACL: Open to Registered Miner + BP -> BP, Exchange NodeInfo, Kayak.Call(): ACL: Open to BP @@ -66,6 +69,8 @@ const ( DHTFindNode // KayakCall is used by BP for data consistency KayakCall + // MetricUploadMetrics uploads node metrics + MetricUploadMetrics // DBSQuery is used by client to read/write database DBSQuery // DBSAck is used by client to send acknowledge to the query response @@ -90,10 +95,6 @@ const ( SQLCSignBilling // SQLCLaunchBilling is used by blockproducer to trigger the billing process in sqlchain SQLCLaunchBilling - // SQLCSubscribeTransactions is used by sqlchain to handle observer subscription request - SQLCSubscribeTransactions - // SQLCCancelSubscription is used by sqlchain to handle observer subscription cancellation request - SQLCCancelSubscription // OBSAdviseNewBlock is used by sqlchain to push new block to observers OBSAdviseNewBlock // MCCAdviseNewBlock is used by block producer to push block to adjacent nodes @@ -115,10 +116,12 @@ const ( MCCNextAccountNonce // MCCAddTx is used by block producer main chain to upload transaction MCCAddTx - // MCCQuerySQLChainProfile is used by nodes to to query SQLChainProfile. + // MCCQuerySQLChainProfile is used by nodes to query SQLChainProfile. MCCQuerySQLChainProfile // MCCQueryAccountTokenBalance is used by block producer to provide account token balance MCCQueryAccountTokenBalance + // MCCQueryTxState is used by client to query transaction state. + MCCQueryTxState // DHTRPCName defines the block producer dh-rpc service name DHTRPCName = "DHT" // BlockProducerRPCName defines main chain rpc name @@ -140,6 +143,8 @@ func (s RemoteFunc) String() string { return "DHT.FindNeighbor" case DHTFindNode: return "DHT.FindNode" + case MetricUploadMetrics: + return "Metric.UploadMetrics" case KayakCall: return "Kayak.Call" case DBSQuery: @@ -166,10 +171,6 @@ func (s RemoteFunc) String() string { return "SQLC.SignBilling" case SQLCLaunchBilling: return "SQLC.LaunchBilling" - case SQLCSubscribeTransactions: - return "SQLC.SubscribeTransactions" - case SQLCCancelSubscription: - return "SQLC.CancelSubscription" case OBSAdviseNewBlock: return "OBS.AdviseNewBlock" case MCCAdviseNewBlock: @@ -194,6 +195,8 @@ func (s RemoteFunc) String() string { return "MCC.QuerySQLChainProfile" case MCCQueryAccountTokenBalance: return "MCC.QueryAccountTokenBalance" + case MCCQueryTxState: + return "MCC.QueryTxState" } return "Unknown" } @@ -217,7 +220,7 @@ func IsPermitted(callerEnvelope *proto.Envelope, funcName RemoteFunc) (ok bool) // non BP switch funcName { // DHT related - case DHTPing, DHTFindNode, DHTFindNeighbor: + case DHTPing, DHTFindNode, DHTFindNeighbor, MetricUploadMetrics: return true // Kayak related case KayakCall: diff --git a/rpc/client.go b/rpc/client.go index b3bc19165..efdca3bd0 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -27,7 +27,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" mux "github.com/xtaci/smux" ) @@ -45,14 +44,14 @@ type Client struct { } var ( - // YamuxConfig holds the default Yamux config - YamuxConfig *mux.Config + // MuxConfig holds the default mux config + MuxConfig *mux.Config // DefaultDialer holds the default dialer of SessionPool DefaultDialer func(nodeID proto.NodeID) (conn net.Conn, err error) ) func init() { - YamuxConfig = mux.DefaultConfig() + MuxConfig = mux.DefaultConfig() DefaultDialer = dialToNode } @@ -61,7 +60,7 @@ func init() { func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.Cipher, isAnonymous bool) (c *etls.CryptoConn, err error) { conn, err := net.Dial(network, address) if err != nil { - log.WithField("addr", address).WithError(err).Error("connect to node failed") + err = errors.Wrapf(err, "connect to node %s failed", address) return } writeBuf := make([]byte, ETLSHeaderSize) @@ -76,12 +75,12 @@ func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.C var nonce *cpuminer.Uint256 nodeIDBytes, err = kms.GetLocalNodeIDBytes() if err != nil { - log.WithError(err).Error("get local node id failed") + err = errors.Wrap(err, "get local node id failed") return } nonce, err = kms.GetLocalNonce() if err != nil { - log.WithError(err).Error("get local nonce failed") + err = errors.Wrap(err, "get local nonce failed") return } copy(writeBuf[2:2+hash.HashSize], nodeIDBytes) @@ -89,7 +88,7 @@ func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.C } wrote, err := conn.Write(writeBuf) if err != nil { - log.WithError(err).Error("write node id and nonce failed") + err = errors.Wrap(err, "write node id and nonce failed") return } @@ -109,21 +108,20 @@ func DialToNode(nodeID proto.NodeID, pool *SessionPool, isAnonymous bool) (conn var sess *mux.Session ETLSConn, err = dialToNodeEx(nodeID, isAnonymous) if err != nil { - log.WithField("target", nodeID).WithError(err).Error("dialToNode failed") return } - sess, err = mux.Client(ETLSConn, YamuxConfig) + sess, err = mux.Client(ETLSConn, MuxConfig) if err != nil { - log.WithField("target", nodeID).WithError(err).Error("init yamux client failed") + err = errors.Wrapf(err, "init yamux client to %s failed", nodeID) return } conn, err = sess.OpenStream() if err != nil { - log.WithField("target", nodeID).WithError(err).Error("open new session failed") + err = errors.Wrapf(err, "open new session to %s failed", nodeID) } return } - log.WithField("poolSize", pool.Len()).Debug("session pool size") + //log.WithField("poolSize", pool.Len()).Debug("session pool size") conn, err = pool.Get(nodeID) return } @@ -153,23 +151,19 @@ func dialToNodeEx(nodeID proto.NodeID, isAnonymous bool) (conn net.Conn, err err */ symmetricKey, err := GetSharedSecretWith(rawNodeID, isAnonymous) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("get shared secret failed") return } nodeAddr, err := GetNodeAddr(rawNodeID) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("resolve node failed") + err = errors.Wrapf(err, "resolve %s failed", rawNodeID.String()) return } cipher := etls.NewCipher(symmetricKey) conn, err = dial("tcp", nodeAddr, rawNodeID, cipher, isAnonymous) if err != nil { - log.WithFields(log.Fields{ - "target": rawNodeID.String(), - "addr": nodeAddr, - }).WithError(err).Error("connect failed") + err = errors.Wrapf(err, "connect %s %s failed", rawNodeID.String(), nodeAddr) return } @@ -197,15 +191,15 @@ func InitClientConn(conn net.Conn) (client *Client, err error) { muxConn, ok := conn.(*mux.Stream) if !ok { var sess *mux.Session - sess, err = mux.Client(conn, YamuxConfig) + sess, err = mux.Client(conn, MuxConfig) if err != nil { - log.WithError(err).Error("init yamux client failed") + err = errors.Wrap(err, "init mux client failed") return } muxConn, err = sess.OpenStream() if err != nil { - log.WithError(err).Error("open stream failed") + err = errors.Wrap(err, "open stream failed") return } } @@ -218,6 +212,6 @@ func InitClientConn(conn net.Conn) (client *Client, err error) { // Close the client RPC connection func (c *Client) Close() { - log.WithField("addr", c.RemoteAddr).Debug("closing client") - c.Client.Close() + //log.WithField("addr", c.RemoteAddr).Debug("closing client") + _ = c.Client.Close() } diff --git a/rpc/jsonrpc/handler.go b/rpc/jsonrpc/handler.go new file mode 100644 index 000000000..d57a3c43f --- /dev/null +++ b/rpc/jsonrpc/handler.go @@ -0,0 +1,84 @@ +package jsonrpc + +import ( + "context" + "fmt" + "reflect" + + "github.com/sourcegraph/jsonrpc2" +) + +var ( + defaultHandler = NewHandler() +) + +// HandlerFunc is a function adapter to Handler. +type HandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) + +// RegisterMethod register a method to the default handler. +func RegisterMethod(method string, handlerFunc HandlerFunc, paramsType interface{}) { + defaultHandler.RegisterMethod(method, handlerFunc, paramsType) +} + +// Handler is a handler handling JSON-RPC protocol. +type Handler struct { + methods map[string]HandlerFunc +} + +// NewHandler creates a new JSONRPCHandler. +func NewHandler() *Handler { + return &Handler{ + methods: make(map[string]HandlerFunc), + } +} + +// RegisterMethod register a method. +func (h *Handler) RegisterMethod(method string, handlerFunc HandlerFunc, paramsType interface{}) { + if _, ok := h.methods[method]; ok { + panic(fmt.Sprintf("method %q already registered", method)) + } + + if paramsType == nil { + h.methods[method] = handlerFunc + return + } + + // Pre-process rpc parameters with a middleware + typ := reflect.TypeOf(paramsType) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + h.methods[method] = processParams(handlerFunc, typ) +} + +// Handle implements jsonrpc2.Handler. +func (h *Handler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) { + jsonrpc2.HandlerWithError(h.handle).Handle(ctx, conn, req) +} + +// handle is a function to be used by jsonrpc2.Handler. +func (h *Handler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + defer func() { + if p := recover(); p != nil { + switch p := p.(type) { + case error: + err = p + default: + err = fmt.Errorf("%v", p) + } + } + }() + + fn := h.methods[req.Method] + if fn == nil { + return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeMethodNotFound} + } else if req.Params == nil { + // pre-check req.Params not be nil + return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams} + } + + return fn(ctx, conn, req) +} diff --git a/rpc/jsonrpc/middleware.go b/rpc/jsonrpc/middleware.go new file mode 100644 index 000000000..378cadfa8 --- /dev/null +++ b/rpc/jsonrpc/middleware.go @@ -0,0 +1,54 @@ +package jsonrpc + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + + "github.com/sourcegraph/jsonrpc2" +) + +// Validator is designed for params checking. +type Validator interface { + Validate() error +} + +// middleware: unmarshal req.Params(JSON array) to pre-defined structures (Object) +func processParams(h HandlerFunc, paramsType reflect.Type) HandlerFunc { + return func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, + ) { + paramsNew := reflect.New(paramsType) + paramsElem := paramsNew.Elem() + paramsArray := make([]interface{}, paramsElem.NumField()) + for i := 0; i < paramsElem.NumField(); i++ { + paramsArray[i] = paramsElem.Field(i).Addr().Interface() + } + + // Unmarshal JSON array to object + // e.g. "[0,10]" --> struct { From: 0, To: 10 } + if err := json.Unmarshal(*req.Params, ¶msArray); err != nil { + return nil, err + } + + if len(paramsArray) != paramsElem.NumField() { + return nil, fmt.Errorf("unexpected parameters, expected %d but got %d", + paramsElem.NumField(), len(paramsArray)) + } + + // parameters validator + params := paramsNew.Interface() + if t, ok := params.(Validator); ok { + if err := t.Validate(); err != nil { + return nil, &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: err.Error(), + } + } + } + + ctx = context.WithValue(ctx, interface{}("_params"), params) + return h(ctx, conn, req) + } +} diff --git a/rpc/jsonrpc/websocket.go b/rpc/jsonrpc/websocket.go new file mode 100644 index 000000000..ae02a62cf --- /dev/null +++ b/rpc/jsonrpc/websocket.go @@ -0,0 +1,70 @@ +package jsonrpc + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/gorilla/websocket" + "github.com/pkg/errors" + "github.com/sourcegraph/jsonrpc2" + wsstream "github.com/sourcegraph/jsonrpc2/websocket" +) + +// WebsocketServer is a websocket server providing JSON-RPC API service. +type WebsocketServer struct { + http.Server + RPCHandler jsonrpc2.Handler +} + +// Serve accepts incoming connections and serve each. +func (ws *WebsocketServer) Serve() error { + var ( + mux = http.NewServeMux() + upgrader = websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + handler = ws.RPCHandler + ) + + if handler == nil { + handler = defaultHandler + } + + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(rw, r, nil) + if err != nil { + log.WithError(err).Error("jsonrpc: upgrade http connection to websocket failed") + http.Error(rw, errors.WithMessage(err, "could not upgrade to websocket").Error(), http.StatusBadRequest) + return + } + defer conn.Close() + + // TODO: add metric for the connections + <-jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + handler, + ).DisconnectNotify() + }) + + addr := ws.Addr + listener, err := net.Listen("tcp", addr) + if err != nil { + return errors.Wrapf(err, "couldn't bind to address %q", addr) + } + + ws.Handler = mux + return ws.Server.Serve(listener) +} + +// Stop stops the server and returns a channel indicating server is stopped. +func (ws *WebsocketServer) Stop() { + log.Warn("jsonrpc: shutdown server") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + if err := ws.Server.Shutdown(ctx); err != nil { + log.WithError(err).Error("jsonrpc: shutdown server") + } + cancel() + log.Warn("jsonrpc: server stopped") +} diff --git a/rpc/leak_test.go b/rpc/leak_test.go index 7bca030c1..9e93bc9f1 100644 --- a/rpc/leak_test.go +++ b/rpc/leak_test.go @@ -31,7 +31,7 @@ import ( ) func TestSessionPool_SessionBroken(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) var err error conf.GConf, err = conf.LoadConfig(FJ(testWorkingDir, "./leak/client.yaml")) @@ -109,7 +109,7 @@ func TestSessionPool_SessionBroken(t *testing.T) { } pool := GetSessionPoolInstance() - sess, _ := pool.getSessionFromPool(leaderNodeID) + sess, _ := pool.getSession(leaderNodeID) log.Debugf("session for %s, %#v", leaderNodeID, sess) sess.Close() diff --git a/rpc/pool.go b/rpc/pool.go index c27f1d2e5..fa19853a0 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -20,16 +20,15 @@ import ( "net" "sync" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" mux "github.com/xtaci/smux" ) // SessPool is the session pool interface type SessPool interface { Get(proto.NodeID) (net.Conn, error) - Set(proto.NodeID, net.Conn) bool - Remove(proto.NodeID) Close() Len() int } @@ -42,16 +41,18 @@ type SessionMap map[proto.NodeID]*Session // Session is the Session type of SessionPool type Session struct { - ID proto.NodeID - Sess *mux.Session - conn net.Conn + sync.RWMutex + nodeDialer NodeDialer + target proto.NodeID + sess []*mux.Session + offset int } // SessionPool is the struct type of session pool type SessionPool struct { + sync.RWMutex sessions SessionMap nodeDialer NodeDialer - sync.RWMutex } var ( @@ -61,7 +62,72 @@ var ( // Close closes the session func (s *Session) Close() { - s.Sess.Close() + s.Lock() + defer s.Unlock() + for _, s := range s.sess { + _ = s.Close() + } + s.sess = s.sess[:0] +} + +// Get returns new connection from session. +func (s *Session) Get() (conn net.Conn, err error) { + s.Lock() + defer s.Unlock() + s.offset++ + s.offset %= conf.MaxRPCPoolPhysicalConnection + + var ( + sess *mux.Session + stream *mux.Stream + sessions []*mux.Session + ) + + for { + if len(s.sess) <= s.offset { + // open new session + sess, err = s.newSession() + if err != nil { + return + } + s.sess = append(s.sess, sess) + s.offset = len(s.sess) - 1 + } else { + sess = s.sess[s.offset] + } + + // open connection + stream, err = sess.OpenStream() + if err != nil { + // invalidate session + sessions = nil + sessions = append(sessions, s.sess[0:s.offset]...) + sessions = append(sessions, s.sess[s.offset+1:]...) + s.sess = sessions + continue + } + + conn = stream + return + } +} + +// Len returns physical connection count. +func (s *Session) Len() int { + s.RLock() + defer s.RUnlock() + return len(s.sess) +} + +func (s *Session) newSession() (sess *mux.Session, err error) { + var conn net.Conn + conn, err = s.nodeDialer(s.target) + if err != nil { + err = errors.Wrap(err, "dialing new session connection failed") + return + } + + return mux.Client(conn, MuxConfig) } // newSessionPool creates a new SessionPool @@ -80,104 +146,42 @@ func GetSessionPoolInstance() *SessionPool { return instance } -// toSession wraps net.Conn to mux.Session -func toSession(id proto.NodeID, conn net.Conn) (sess *Session, err error) { - // create mux session - newSess, err := mux.Client(conn, YamuxConfig) - if err != nil { - //log.Errorf("dial to new node %s failed: %s", id, err) // no log in lock - return - } - // Store it - sess = &Session{ - ID: id, - Sess: newSess, - conn: conn, - } - return -} - -// LoadOrStore returns the existing Session for the node id if present. Otherwise, it stores and -// returns the given Session. The loaded result is true if the Session was loaded, false if stored. -func (p *SessionPool) LoadOrStore(id proto.NodeID, newSess *Session) (sess *Session, loaded bool) { +func (p *SessionPool) getSession(id proto.NodeID) (sess *Session, loaded bool) { // NO Blocking operation in this function p.Lock() + defer p.Unlock() sess, exist := p.sessions[id] if exist { - p.Unlock() - log.WithField("node", id).Debug("load session for target node") + //log.WithField("node", id).Debug("load session for target node") loaded = true } else { - p.sessions[id] = newSess - p.Unlock() - sess = newSess + // new session + sess = &Session{ + nodeDialer: p.nodeDialer, + target: id, + } + p.sessions[id] = sess } return } -func (p *SessionPool) getSessionFromPool(id proto.NodeID) (sess *Session, ok bool) { - sess, ok = p.sessions[id] - return -} - // Get returns existing session to the node, if not exist try best to create one func (p *SessionPool) Get(id proto.NodeID) (conn net.Conn, err error) { - // first try to get one session from pool - p.Lock() - cachedConn, ok := p.getSessionFromPool(id) - p.Unlock() - if ok { - conn, err = cachedConn.Sess.OpenStream() - if err == nil { - log.WithField("node", id).Debug("reusing session") - return - } - log.WithField("target", id).WithError(err).Error("open session failed") - p.Remove(id) - } - - log.WithField("target", id).Debug("dialing new session") - // Can't find existing Session, try to dial one - newConn, err := p.nodeDialer(id) - if err != nil { - log.WithField("node", id).WithError(err).Error("dial new session failed") - return - } - newSess, err := toSession(id, newConn) - if err != nil { - newConn.Close() - log.WithField("node", id).WithError(err).Error("dial new session failed") - return - } - sess, loaded := p.LoadOrStore(id, newSess) - if loaded { - newSess.Close() - } - return sess.Sess.OpenStream() -} - -// Set tries to set a new connection to the pool, typically from Accept() -// if there is an existing one, just do nothing -func (p *SessionPool) Set(id proto.NodeID, conn net.Conn) (exist bool) { - sess, err := toSession(id, conn) - if err != nil { - return - } - _, exist = p.LoadOrStore(id, sess) - return + var sess *Session + sess, _ = p.getSession(id) + return sess.Get() } // Remove the node sessions in the pool func (p *SessionPool) Remove(id proto.NodeID) { p.Lock() - sess, ok := p.getSessionFromPool(id) - if ok { - delete(p.sessions, id) - p.Unlock() + defer p.Unlock() + sess, exist := p.sessions[id] + if exist { sess.Close() - } else { - p.Unlock() + delete(p.sessions, id) } + return } // Close closes all sessions in the pool @@ -191,8 +195,12 @@ func (p *SessionPool) Close() { } // Len returns the session counts in the pool -func (p *SessionPool) Len() int { +func (p *SessionPool) Len() (total int) { p.RLock() defer p.RUnlock() - return len(p.sessions) + + for _, s := range p.sessions { + total += s.Len() + } + return } diff --git a/rpc/pool_test.go b/rpc/pool_test.go index 6cac13b3b..7a0e5b124 100644 --- a/rpc/pool_test.go +++ b/rpc/pool_test.go @@ -22,6 +22,7 @@ import ( "sync" "testing" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -32,7 +33,7 @@ import ( const ( localAddr = "127.0.0.1:4444" localAddr2 = "127.0.0.1:4445" - concurrency = 4 + concurrency = conf.MaxRPCPoolPhysicalConnection + 1 packetCount = 100 ) @@ -48,35 +49,37 @@ func server(c C, localAddr string, n int) error { // Accept a TCP connection listener, err := net.Listen("tcp", localAddr) go func() { - conn, err := listener.Accept() - c.So(err, ShouldBeNil) - - // Setup server side of mux - log.Println("creating server session") - session, err := mux.Server(conn, nil) - c.So(err, ShouldBeNil) - for i := 0; i < concurrency; i++ { - go func(i int, c2 C) { - // Accept a stream - //c2.So(err, ShouldBeNil) - // Stream implements net.Conn - // Listen for a message - //c2.So(string(buf1), ShouldEqual, "ping") - log.Println("accepting stream") - stream, err := session.AcceptStream() - if err == nil { - buf1 := make([]byte, 4) - for i := 0; i < n; { - n, err := stream.Read(buf1) - if n == 4 && err == nil { - i++ - c2.So(string(buf1), ShouldEqual, "ping") + go func() { + conn, err := listener.Accept() + c.So(err, ShouldBeNil) + + // Setup server side of mux + log.Println("creating server session") + session, err := mux.Server(conn, nil) + c.So(err, ShouldBeNil) + + for i := 0; i < concurrency; i++ { + // Accept a stream + //c.So(err, ShouldBeNil) + // Stream implements net.Conn + // Listen for a message + //c.So(string(buf1), ShouldEqual, "ping") + log.Println("accepting stream") + stream, err := session.AcceptStream() + if err == nil { + buf1 := make([]byte, 4) + for i := 0; i < n; { + n, err := stream.Read(buf1) + if n == 4 && err == nil { + i++ + c.So(string(buf1), ShouldEqual, "ping") + } } + log.Debugf("buf#%d read done", i) } - log.Debugf("buf#%d read done", i) } - }(i, c) + }() } }() return err @@ -84,7 +87,7 @@ func server(c C, localAddr string, n int) error { func BenchmarkSessionPool_Get(b *testing.B) { Convey("session pool", b, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) p := newSessionPool(func(nodeID proto.NodeID) (net.Conn, error) { log.Debugf("creating new connection to %s", nodeID) return net.Dial("tcp", string(nodeID)) @@ -116,7 +119,7 @@ func BenchmarkSessionPool_Get(b *testing.B) { func TestNewSessionPool(t *testing.T) { Convey("session pool", t, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) p := newSessionPool(func(nodeID proto.NodeID) (net.Conn, error) { log.Debugf("creating new connection to %s", nodeID) return net.Dial("tcp", string(nodeID)) @@ -149,15 +152,12 @@ func TestNewSessionPool(t *testing.T) { } wg.Wait() - So(p.Len(), ShouldEqual, 1) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection) server(c, localAddr2, packetCount) - conn, _ := net.Dial("tcp", localAddr2) - exists := p.Set(proto.NodeID(localAddr2), conn) - So(exists, ShouldBeFalse) - exists = p.Set(proto.NodeID(localAddr2), conn) - So(exists, ShouldBeTrue) - So(p.Len(), ShouldEqual, 2) + _, err := p.Get(proto.NodeID(localAddr2)) + So(err, ShouldBeNil) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection+1) wg2 := &sync.WaitGroup{} wg2.Add(concurrency) @@ -182,14 +182,13 @@ func TestNewSessionPool(t *testing.T) { } wg2.Wait() - So(p.Len(), ShouldEqual, 2) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection*2) p.Remove(proto.NodeID(localAddr2)) - So(p.Len(), ShouldEqual, 1) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection) p.Close() So(p.Len(), ShouldEqual, 0) - }) Convey("session pool get instance", t, func(c C) { diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 96e2aca11..3588ca5a5 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -18,19 +18,23 @@ package rpc import ( "context" - "errors" + "expvar" "io" "math/rand" "net" "net/rpc" "strings" "sync" + "time" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" mux "github.com/xtaci/smux" + mw "github.com/zserge/metric" ) var ( @@ -41,6 +45,8 @@ var ( currentBP proto.NodeID // currentBPLock represents the chief block producer access lock. currentBPLock sync.Mutex + // callRPCExpvarLock is the lock of RPC Call Publish lock + callRPCExpvarLock sync.Mutex ) // PersistentCaller is a wrapper for session pooling and RPC calling. @@ -66,17 +72,16 @@ func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { c.Lock() defer c.Unlock() if c.client == nil { - log.Debug("init new rpc client") var conn net.Conn conn, err = DialToNode(c.TargetID, c.pool, isAnonymous) if err != nil { - log.WithField("target", c.TargetID).WithError(err).Error("dial to node failed") + err = errors.Wrap(err, "dial to node failed") return } //conn.SetDeadline(time.Time{}) c.client, err = InitClientConn(conn) if err != nil { - log.WithError(err).Error("init RPC client failed") + err = errors.Wrap(err, "init RPC client failed") return } } @@ -85,9 +90,14 @@ func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { // Call invokes the named function, waits for it to complete, and returns its error status. func (c *PersistentCaller) Call(method string, args interface{}, reply interface{}) (err error) { + startTime := time.Now() + defer func() { + recordRPCCost(startTime, method, err) + }() + err = c.initClient(method == route.DHTPing.String()) if err != nil { - log.WithError(err).Error("init PersistentCaller client failed") + err = errors.Wrap(err, "init PersistentCaller client failed") return } err = c.client.Call(method, args, reply) @@ -101,10 +111,11 @@ func (c *PersistentCaller) Call(method string, args interface{}, reply interface // if got EOF, retry once reconnectErr := c.ResetClient(method) if reconnectErr != nil { - log.WithField("rpc", method).WithError(reconnectErr).Error("reconnect failed") + err = errors.Wrap(reconnectErr, "reconnect failed") } } - log.WithField("rpc", method).WithError(err).Error("call RPC failed") + err = errors.Wrapf(err, "call %s failed", method) + return } return } @@ -124,7 +135,7 @@ func (c *PersistentCaller) CloseStream() { if c.client.Conn != nil { stream, ok := c.client.Conn.(*mux.Stream) if ok { - stream.Close() + _ = stream.Close() } } c.client.Close() @@ -155,12 +166,51 @@ func (c *Caller) CallNode( return c.CallNodeWithContext(context.Background(), node, method, args, reply) } +func recordRPCCost(startTime time.Time, method string, err error) { + var ( + name, nameC string + val, valC expvar.Var + ) + costTime := time.Since(startTime) + if err == nil { + name = "t_succ:" + method + nameC = "c_succ:" + method + } else { + name = "t_fail:" + method + nameC = "c_fail:" + method + } + // Optimistically, val will not be nil except the first Call of method + // expvar uses sync.Map + // So, we try it first without lock + val = expvar.Get(name) + valC = expvar.Get(nameC) + if val == nil || valC == nil { + callRPCExpvarLock.Lock() + val = expvar.Get(name) + if val == nil { + expvar.Publish(name, mw.NewHistogram("10s1s", "1m5s", "1h1m")) + expvar.Publish(nameC, mw.NewCounter("10s1s", "1h1m")) + } + callRPCExpvarLock.Unlock() + val = expvar.Get(name) + valC = expvar.Get(nameC) + } + val.(mw.Metric).Add(costTime.Seconds()) + valC.(mw.Metric).Add(1) + return +} + // CallNodeWithContext invokes the named function, waits for it to complete or context timeout, and returns its error status. func (c *Caller) CallNodeWithContext( ctx context.Context, node proto.NodeID, method string, args interface{}, reply interface{}) (err error) { + startTime := time.Now() + defer func() { + recordRPCCost(startTime, method, err) + }() + conn, err := DialToNode(node, c.pool, method == route.DHTPing.String()) if err != nil { - log.WithField("node", node).WithError(err).Error("dial to node failed") + err = errors.Wrapf(err, "dial to node %s failed", node) return } @@ -175,7 +225,7 @@ func (c *Caller) CallNodeWithContext( client, err := InitClientConn(conn) if err != nil { - log.WithError(err).Error("init RPC client failed") + err = errors.Wrap(err, "init RPC client failed") return } @@ -198,11 +248,11 @@ func (c *Caller) CallNodeWithContext( func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { addr, err = route.GetNodeAddrCache(id) if err != nil { - log.WithField("target", id.String()).WithError(err).Info("get node addr from cache failed") + //log.WithField("target", id.String()).WithError(err).Debug("get node addr from cache failed") if err == route.ErrUnknownNodeID { BPs := route.GetBPs() if len(BPs) == 0 { - log.Error("no available BP") + err = errors.New("no available BP") return } client := NewCaller() @@ -215,10 +265,7 @@ func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { method := "DHT.FindNode" err = client.CallNode(bp, method, reqFN, respFN) if err != nil { - log.WithFields(log.Fields{ - "bpNode": bp, - "rpc": method, - }).WithError(err).Error("call dht rpc failed") + err = errors.Wrapf(err, "call dht rpc %s to %s failed", method, bp) return } route.SetNodeAddrCache(id, respFN.Node.Addr) @@ -232,11 +279,11 @@ func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { func GetNodeInfo(id *proto.RawNodeID) (nodeInfo *proto.Node, err error) { nodeInfo, err = kms.GetNodeInfo(proto.NodeID(id.String())) if err != nil { - log.WithField("target", id.String()).WithError(err).Info("get node info from KMS failed") - if err == kms.ErrKeyNotFound { + //log.WithField("target", id.String()).WithError(err).Info("get node info from KMS failed") + if errors.Cause(err) == kms.ErrKeyNotFound { BPs := route.GetBPs() if len(BPs) == 0 { - log.Error("no available BP") + err = errors.New("no available BP") return } client := NewCaller() @@ -248,10 +295,7 @@ func GetNodeInfo(id *proto.RawNodeID) (nodeInfo *proto.Node, err error) { method := "DHT.FindNode" err = client.CallNode(bp, method, reqFN, respFN) if err != nil { - log.WithFields(log.Fields{ - "bpNode": bp, - "rpc": method, - }).WithError(err).Error("call dht rpc failed") + err = errors.Wrapf(err, "call dht rpc %s to %s failed", method, bp) return } nodeInfo = respFN.Node @@ -279,11 +323,9 @@ func PingBP(node *proto.Node, BPNodeID proto.NodeID) (err error) { resp := new(proto.PingResp) err = client.CallNode(BPNodeID, "DHT.Ping", req, resp) if err != nil { - log.WithError(err).Error("call DHT.Ping failed") + err = errors.Wrap(err, "call DHT.Ping failed") return } - log.Debugf("PingBP resp: %#v", resp) - return } @@ -317,8 +359,7 @@ func GetCurrentBP() (bpNodeID proto.NodeID, err error) { ID: localNodeID, Roles: []proto.ServerRole{ proto.Leader, - // only leader is capable of allocating database in current implementation - //proto.Follower, + proto.Follower, }, Count: 1, } @@ -328,16 +369,16 @@ func GetCurrentBP() (bpNodeID proto.NodeID, err error) { } if len(res.Nodes) <= 0 { - log.Error("get no hash nearest block producer nodes") // node not found - err = ErrNoChiefBlockProducerAvailable + err = errors.Wrap(ErrNoChiefBlockProducerAvailable, + "get no hash nearest block producer nodes") return } if res.Nodes[0].Role != proto.Leader && res.Nodes[0].Role != proto.Follower { - log.Error("no suitable nodes with proper block producer role") // not block producer - err = ErrNoChiefBlockProducerAvailable + err = errors.Wrap(ErrNoChiefBlockProducerAvailable, + "no suitable nodes with proper block producer role") return } @@ -362,3 +403,54 @@ func RequestBP(method string, req interface{}, resp interface{}) (err error) { } return NewCaller().CallNode(bp, method, req, resp) } + +// RegisterNodeToBP registers the current node to bp network. +func RegisterNodeToBP(timeout time.Duration) (err error) { + // get local node id + localNodeID, err := kms.GetLocalNodeID() + if err != nil { + err = errors.Wrap(err, "register node to BP") + return + } + + // get local node info + localNodeInfo, err := kms.GetNodeInfo(localNodeID) + if err != nil { + err = errors.Wrap(err, "register node to BP") + return + } + + log.WithField("node", localNodeInfo).Debug("construct local node info") + + pingWaitCh := make(chan proto.NodeID) + bpNodeIDs := route.GetBPs() + for _, bpNodeID := range bpNodeIDs { + go func(ch chan proto.NodeID, id proto.NodeID) { + for { + err := PingBP(localNodeInfo, id) + if err == nil { + log.Infof("ping BP succeed: %v", localNodeInfo) + ch <- id + return + } + if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { + log.Debug("stop ping non leader BP node") + return + } + + log.Warnf("ping BP failed: %v", err) + time.Sleep(3 * time.Second) + } + }(pingWaitCh, bpNodeID) + } + + select { + case bp := <-pingWaitCh: + close(pingWaitCh) + log.WithField("BP", bp).Infof("ping BP succeed") + case <-time.After(timeout): + return errors.New("ping BP timeout") + } + + return +} diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 00c51c87f..088b14dca 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -18,6 +18,7 @@ package rpc import ( "context" + "fmt" "os" "path/filepath" "runtime" @@ -42,7 +43,7 @@ const ( ) func TestCaller_CallNode(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) os.Remove(publicKeyStore) @@ -162,7 +163,7 @@ func TestCaller_CallNode(t *testing.T) { } func TestNewPersistentCaller(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) os.Remove(publicKeyStore) @@ -227,8 +228,13 @@ func TestNewPersistentCaller(t *testing.T) { err = client.Call("DHT.FindNeighbor", req, resp) if err == nil || !strings.Contains(err.Error(), "not permitted") { + if err != nil { + t.Errorf("unexpected error %s", err.Error()) + } else { + t.Errorf("unexpected resp %v", resp) + } t.Fatal("anonymous ETLS connection used by " + - "RPC other than DHTPing shuold not permitted") + "RPC other than DHTPing should not permitted") } // close anonymous ETLS connection, and create new one @@ -269,11 +275,11 @@ func TestNewPersistentCaller(t *testing.T) { client2.CloseStream() wg.Wait() - sess, ok := client2.pool.getSessionFromPool(conf.GConf.BP.NodeID) + sess, ok := client2.pool.getSession(conf.GConf.BP.NodeID) if !ok { t.Fatalf("can not find session for %s", conf.GConf.BP.NodeID) } - sess.conn.Close() + sess.Close() client3 := NewPersistentCaller(conf.GConf.BP.NodeID) err = client3.Call("DHT.FindNeighbor", reqF2, respF2) @@ -284,6 +290,82 @@ func TestNewPersistentCaller(t *testing.T) { } +func BenchmarkPersistentCaller_CallKayakLog(b *testing.B) { + log.SetLevel(log.FatalLevel) + os.Remove(PubKeyStorePath) + defer os.Remove(PubKeyStorePath) + os.Remove(publicKeyStore) + defer os.Remove(publicKeyStore) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer cancel() + err := utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 2230, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + _, testFile, _, _ := runtime.Caller(0) + confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") + privateKeyPath := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/private.key") + + conf.GConf, _ = conf.LoadConfig(confFile) + log.Debugf("GConf: %#v", conf.GConf) + // reset the once + route.Once = sync.Once{} + route.InitKMS(publicKeyStore) + + addr := conf.GConf.ListenAddr + _, err = route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) + + server, err := NewServerWithService(ServiceMap{"Test": &fakeService{}}) + if err != nil { + b.Fatal(err) + } + + _ = server.InitRPCServer(addr, privateKeyPath, []byte{}) + go server.Serve() + + client := NewPersistentCaller(conf.GConf.BP.NodeID) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + req := &FakeRequest{} + req.Log.Data = []byte(strings.Repeat("1", 500)) + err = client.Call("Test.Call", req, nil) + if err != nil { + b.Error(err) + } + } + }) + b.StopTimer() + time.Sleep(5 * time.Second) + server.Stop() + GetSessionPoolInstance().Close() +} + +type fakeService struct{} + +type FakeRequest struct { + proto.Envelope + Instance string + Log struct { + Index uint64 // log index + Version uint64 // log version + Type uint8 // log type + Producer proto.NodeID // producer node + DataLength uint64 // data length + Data []byte + } +} + +func (s *fakeService) Call(req *FakeRequest, resp *interface{}) (err error) { + time.Sleep(time.Microsecond * 600) + return +} + func BenchmarkPersistentCaller_Call(b *testing.B) { log.SetLevel(log.InfoLevel) os.Remove(PubKeyStorePath) @@ -351,7 +433,6 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { }) b.Run("benchmark Persistent Call parallel Nil", func(b *testing.B) { - b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { @@ -363,6 +444,18 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { }) }) + b.Run("benchmark Persistent Call parallel 1k", func(b *testing.B) { + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Call("DHT.Nil", strings.Repeat("a", 1000), nil) + if err != nil { + b.Error(err) + } + } + }) + }) + req := &proto.FindNeighborReq{ ID: "1234567812345678123456781234567812345678123456781234567812345678", Count: 10, @@ -426,3 +519,28 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { server.Stop() } + +func TestRecordRPCCost(t *testing.T) { + Convey("Bug: bad critical section for multiple values", t, func(c C) { + var ( + start = time.Now() + rounds = 1000 + concurrent = 10 + wg = &sync.WaitGroup{} + body = func(i int) { + defer func() { + c.So(recover(), ShouldBeNil) + wg.Done() + }() + recordRPCCost(start, fmt.Sprintf("M%d", i), nil) + } + ) + for i := 0; i < rounds; i++ { + for j := 0; j < concurrent; j++ { + wg.Add(1) + go body(i) + } + wg.Wait() + } + }) +} diff --git a/rpc/server.go b/rpc/server.go index 30d1291e7..409e10d76 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -65,13 +65,13 @@ func (s *Server) InitRPCServer( err = kms.InitLocalKeyPair(privateKeyPath, masterKey) if err != nil { - log.WithError(err).Error("init local key pair failed") + err = errors.Wrap(err, "init local key pair failed") return } l, err := etls.NewCryptoListener("tcp", addr, handleCipher) if err != nil { - log.WithError(err).Error("create crypto listener failed") + err = errors.Wrap(err, "create crypto listener failed") return } @@ -111,6 +111,7 @@ serverLoop: if err != nil { continue } + log.WithField("remote", conn.RemoteAddr().String()).Info("accept") go s.handleConn(conn) } } @@ -122,15 +123,21 @@ func (s *Server) handleConn(conn net.Conn) { // remote remoteNodeID connection awareness var remoteNodeID *proto.RawNodeID + var err error if c, ok := conn.(*etls.CryptoConn); ok { + conn, err = s.Listener.(*etls.CryptoListener).CHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } // set node id - remoteNodeID = c.NodeID + remoteNodeID = conn.(*etls.CryptoConn).NodeID } - sess, err := mux.Server(conn, YamuxConfig) + sess, err := mux.Server(conn, MuxConfig) if err != nil { - log.Error(err) + err = errors.Wrap(err, "create mux server failed") return } defer sess.Close() @@ -145,9 +152,9 @@ sessionLoop: muxConn, err := sess.AcceptStream() if err != nil { if err == io.EOF { - log.WithField("remote", remoteNodeID).Debug("session connection closed") + //log.WithField("remote", remoteNodeID).Debug("session connection closed") } else { - log.WithField("remote", remoteNodeID).WithError(err).Error("session accept failed") + err = errors.Wrapf(err, "session accept failed, remote: %s", remoteNodeID) } break sessionLoop } @@ -179,10 +186,16 @@ func handleCipher(conn net.Conn) (cryptoConn *etls.CryptoConn, err error) { // NodeID + Uint256 Nonce headerBuf := make([]byte, ETLSHeaderSize) rCount, err := conn.Read(headerBuf) - if err != nil || rCount != ETLSHeaderSize { - log.WithError(err).Error("read node header error") + if err != nil { + err = errors.Wrap(err, "read node header error") return } + + if rCount != ETLSHeaderSize { + err = errors.New("invalid ETLS header size") + return + } + if headerBuf[0] != etls.ETLSMagicBytes[0] || headerBuf[1] != etls.ETLSMagicBytes[1] { err = errors.New("bad ETLS header") return @@ -199,7 +212,7 @@ func handleCipher(conn net.Conn) (cryptoConn *etls.CryptoConn, err error) { rawNodeID.IsEqual(&kms.AnonymousRawNodeID.Hash), ) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("get shared secret") + err = errors.Wrapf(err, "get shared secret, target: %s", rawNodeID.String()) return } cipher := etls.NewCipher(symmetricKey) diff --git a/rpc/server_test.go b/rpc/server_test.go index 04b92bcc8..f4a4b5ea6 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -79,7 +79,7 @@ func (s *TestService) IncCounterSimpleArgs(step int, ret *int) error { } func TestIncCounter(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { @@ -120,7 +120,7 @@ func TestIncCounter(t *testing.T) { } func TestIncCounterSimpleArgs(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { @@ -149,7 +149,7 @@ func TestIncCounterSimpleArgs(t *testing.T) { func TestEncryptIncCounterSimpleArgs(t *testing.T) { defer os.Remove(PubKeyStorePath) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") server, err := NewServerWithService(ServiceMap{"Test": NewTestService()}) @@ -185,10 +185,56 @@ func TestEncryptIncCounterSimpleArgs(t *testing.T) { server.Stop() } +func TestETLSBug(t *testing.T) { + defer os.Remove(PubKeyStorePath) + log.SetLevel(log.FatalLevel) + addr := "127.0.0.1:0" + masterKey := []byte("abc") + server, err := NewServerWithService(ServiceMap{"Test": NewTestService()}) + if err != nil { + log.Fatal(err) + } + + route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) + server.InitRPCServer(addr, "../keys/test.key", masterKey) + go server.Serve() + defer server.Stop() + + // This should not block listener + var rawConn net.Conn + rawConn, err = net.Dial("tcp", server.Listener.Addr().String()) + if err != nil { + log.Fatal(err) + } + defer rawConn.Close() + + publicKey, err := kms.GetLocalPublicKey() + nonce := asymmetric.GetPubKeyNonce(publicKey, 10, 100*time.Millisecond, nil) + serverNodeID := proto.NodeID(nonce.Hash.String()) + kms.SetPublicKey(serverNodeID, nonce.Nonce, publicKey) + kms.SetLocalNodeIDNonce(nonce.Hash.CloneBytes(), &nonce.Nonce) + route.SetNodeAddrCache(&proto.RawNodeID{Hash: nonce.Hash}, server.Listener.Addr().String()) + + cryptoConn, err := DialToNode(serverNodeID, nil, false) + cryptoConn.SetDeadline(time.Now().Add(3 * time.Second)) + client, err := InitClientConn(cryptoConn) + if err != nil { + log.Fatal(err) + } + defer client.Close() + + repSimple := new(int) + err = client.Call("Test.IncCounterSimpleArgs", 10, repSimple) + if err != nil { + log.Fatal(err) + } + CheckNum(*repSimple, 10, t) +} + func TestEncPingFindNeighbor(t *testing.T) { os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") dht, err := route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) @@ -269,7 +315,7 @@ func TestEncPingFindNeighbor(t *testing.T) { } func TestServer_Close(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { diff --git a/rpc/sharedsecret.go b/rpc/sharedsecret.go index 86d150464..c1a5be739 100644 --- a/rpc/sharedsecret.go +++ b/rpc/sharedsecret.go @@ -17,15 +17,15 @@ package rpc import ( - "fmt" "sync" + "github.com/pkg/errors" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/utils/log" ) var symmetricKeyCache sync.Map @@ -34,9 +34,9 @@ var symmetricKeyCache sync.Map func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKey []byte, err error) { if isAnonymous { symmetricKey = []byte(`!&\\!qEyey*\cbLc,aKl`) - log.Debug("using anonymous ETLS") + //log.Debug("using anonymous ETLS") } else { - symmetricKeyI, ok := symmetricKeyCache.Load(nodeID) + symmetricKeyI, ok := symmetricKeyCache.Load(nodeID.String()) if ok { symmetricKey, _ = symmetricKeyI.([]byte) } else { @@ -46,7 +46,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe } else if conf.RoleTag[0] == conf.BlockProducerBuildTag[0] { remotePublicKey, err = kms.GetPublicKey(proto.NodeID(nodeID.String())) if err != nil { - log.WithField("node", nodeID).WithError(err).Error("get public key locally failed") + err = errors.Wrapf(err, "get public key locally failed, node: %s", nodeID) return } } else { @@ -54,7 +54,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe var nodeInfo *proto.Node nodeInfo, err = GetNodeInfo(nodeID) if err != nil { - log.WithField("node", nodeID).WithError(err).Error("get public key failed") + err = errors.Wrapf(err, "get public key failed, node: %s", nodeID) return } remotePublicKey = nodeInfo.PublicKey @@ -63,17 +63,17 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe var localPrivateKey *asymmetric.PrivateKey localPrivateKey, err = kms.GetLocalPrivateKey() if err != nil { - log.WithError(err).Error("get local private key failed") + err = errors.Wrap(err, "get local private key failed") return } symmetricKey = asymmetric.GenECDHSharedSecret(localPrivateKey, remotePublicKey) - symmetricKeyCache.Store(nodeID, symmetricKey) - log.WithFields(log.Fields{ - "node": nodeID.String(), - "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), - "sessionKey": fmt.Sprintf("%#x", symmetricKey), - }).Debug("generated shared secret") + symmetricKeyCache.Store(nodeID.String(), symmetricKey) + //log.WithFields(log.Fields{ + // "node": nodeID.String(), + // "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), + // "sessionKey": fmt.Sprintf("%#x", symmetricKey), + //}).Debug("generated shared secret") } //log.Debugf("ECDH for %s Public Key: %x, Private Key: %x Session Key: %x", // nodeID.ToNodeID(), remotePublicKey.Serialize(), localPrivateKey.Serialize(), symmetricKey) diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go index 1e596db45..ded9e5965 100644 --- a/sqlchain/ackindex.go +++ b/sqlchain/ackindex.go @@ -29,18 +29,15 @@ var ( // Global atomic counters for stats multiIndexCount int32 responseCount int32 - ackTrackerCount int32 + ackCount int32 ) -type ackTracker struct { - resp *types.SignedResponseHeader - ack *types.SignedAckHeader -} - type multiAckIndex struct { sync.RWMutex - ri map[types.QueryKey]*types.SignedResponseHeader // ri is the index of queries without acks - qi map[types.QueryKey]*ackTracker // qi is the index of query trackers + // respIndex is the index of query responses without acks + respIndex map[types.QueryKey]*types.SignedResponseHeader + // ackIndex is the index of acknowledged queries + ackIndex map[types.QueryKey]*types.SignedAckHeader } func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error) { @@ -48,14 +45,14 @@ func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error log.Debugf("adding key %s <-- resp %s", &key, resp.Hash()) i.Lock() defer i.Unlock() - if oresp, ok := i.ri[key]; ok { + if oresp, ok := i.respIndex[key]; ok { if oresp.Hash() != resp.Hash() { err = errors.Wrapf(ErrResponseSeqNotMatch, "add key %s <-- resp %s", &key, resp.Hash()) return } return } - i.ri[key] = resp + i.respIndex[key] = resp atomic.AddInt32(&responseCount, 1) return } @@ -64,44 +61,44 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { var ( resp *types.SignedResponseHeader ok bool - key = ack.SignedRequestHeader().GetQueryKey() + key = ack.GetQueryKey() ) log.Debugf("registering key %s <-- ack %s", &key, ack.Hash()) i.Lock() defer i.Unlock() - if resp, ok = i.ri[key]; !ok { + if resp, ok = i.respIndex[key]; !ok { err = errors.Wrapf(ErrQueryNotFound, "register key %s <-- ack %s", &key, ack.Hash()) return } - delete(i.ri, key) - i.qi[key] = &ackTracker{ - resp: resp, - ack: ack, + if resp.Hash() != ack.GetResponseHash() { + err = errors.Wrapf(ErrResponseSeqNotMatch, "register key %s <-- ack %s", &key, ack.Hash()) } + delete(i.respIndex, key) + i.ackIndex[key] = ack atomic.AddInt32(&responseCount, -1) - atomic.AddInt32(&ackTrackerCount, 1) + atomic.AddInt32(&ackCount, 1) return } func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { - var key = ack.SignedRequestHeader().GetQueryKey() + var key = ack.GetQueryKey() log.Debugf("removing key %s -x- ack %s", &key, ack.Hash()) i.Lock() defer i.Unlock() - if _, ok := i.ri[key]; ok { - delete(i.ri, key) + if _, ok := i.respIndex[key]; ok { + delete(i.respIndex, key) atomic.AddInt32(&responseCount, -1) return } - if oack, ok := i.qi[key]; ok { - if oack.ack.Hash() != ack.Hash() { + if oack, ok := i.ackIndex[key]; ok { + if oack.Hash() != ack.Hash() { err = errors.Wrapf( ErrMultipleAckOfSeqNo, "remove key %s -x- ack %s", &key, ack.Hash()) return } - delete(i.qi, key) - atomic.AddInt32(&ackTrackerCount, -1) + delete(i.ackIndex, key) + atomic.AddInt32(&ackCount, -1) return } err = errors.Wrapf(ErrQueryNotFound, "remove key %s -x- ack %s", &key, ack.Hash()) @@ -111,8 +108,8 @@ func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { func (i *multiAckIndex) acks() (ret []*types.SignedAckHeader) { i.RLock() defer i.RUnlock() - for _, v := range i.qi { - ret = append(ret, v.ack) + for _, v := range i.ackIndex { + ret = append(ret, v) } return } @@ -121,10 +118,10 @@ func (i *multiAckIndex) expire() { i.RLock() defer i.RUnlock() // TODO(leventeliu): need further processing. - for _, v := range i.ri { + for _, v := range i.respIndex { log.WithFields(log.Fields{ - "request_hash": v.Request.Hash(), - "request_time": v.Request.Timestamp, + "request_hash": v.GetRequestHash(), + "request_time": v.GetRequestTimestamp(), "request_type": v.Request.QueryType, "request_node": v.Request.NodeID, "response_hash": v.Hash(), @@ -132,18 +129,18 @@ func (i *multiAckIndex) expire() { "response_time": v.Timestamp, }).Warn("query expires without acknowledgement") } - for _, v := range i.qi { + for _, v := range i.ackIndex { log.WithFields(log.Fields{ - "request_hash": v.resp.Request.Hash(), - "request_time": v.resp.Request.Timestamp, - "request_type": v.resp.Request.QueryType, - "request_node": v.resp.Request.NodeID, - "response_hash": v.ack.Response.Hash(), - "response_node": v.ack.Response.NodeID, - "response_time": v.ack.Response.Timestamp, - "ack_hash": v.ack.Hash(), - "ack_node": v.ack.NodeID, - "ack_time": v.ack.Timestamp, + "request_hash": v.GetRequestHash(), + "request_time": v.GetRequestTimestamp(), + "request_type": v.Response.Request.QueryType, + "request_node": v.Response.Request.NodeID, + "response_hash": v.GetResponseHash(), + "response_node": v.Response.NodeID, + "response_time": v.GetResponseTimestamp(), + "ack_hash": v.Hash(), + "ack_node": v.NodeID, + "ack_time": v.Timestamp, }).Warn("query expires without block producing") } } @@ -171,8 +168,8 @@ func (i *ackIndex) load(h int32) (mi *multiAckIndex, err error) { } if mi, ok = i.hi[h]; !ok { mi = &multiAckIndex{ - ri: make(map[types.QueryKey]*types.SignedResponseHeader), - qi: make(map[types.QueryKey]*ackTracker), + respIndex: make(map[types.QueryKey]*types.SignedResponseHeader), + ackIndex: make(map[types.QueryKey]*types.SignedAckHeader), } i.hi[h] = mi atomic.AddInt32(&multiIndexCount, 1) @@ -194,8 +191,8 @@ func (i *ackIndex) advance(h int32) { // Record expired and not acknowledged queries for _, v := range dl { v.expire() - atomic.AddInt32(&responseCount, int32(-len(v.ri))) - atomic.AddInt32(&ackTrackerCount, int32(-len(v.qi))) + atomic.AddInt32(&responseCount, int32(-len(v.respIndex))) + atomic.AddInt32(&ackCount, int32(-len(v.ackIndex))) } atomic.AddInt32(&multiIndexCount, int32(-len(dl))) } diff --git a/sqlchain/ackindex_test.go b/sqlchain/ackindex_test.go index 77ccd4d18..53698613d 100644 --- a/sqlchain/ackindex_test.go +++ b/sqlchain/ackindex_test.go @@ -32,19 +32,17 @@ func TestAckIndex(t *testing.T) { ai = newAckIndex() resp = &types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: types.SignedRequestHeader{ - RequestHeader: types.RequestHeader{ - NodeID: proto.NodeID( - "0000000000000000000000000000000000000000000000000000000000000000"), - ConnectionID: 0, - SeqNo: 0, - }, + Request: types.RequestHeader{ + NodeID: proto.NodeID( + "0000000000000000000000000000000000000000000000000000000000000000"), + ConnectionID: 0, + SeqNo: 0, }, }, } ack = &types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: *resp, + Response: resp.ResponseHeader, }, } ) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cd314ef99..2ee56f765 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -19,6 +19,7 @@ package sqlchain import ( "bytes" "context" + "database/sql" "encoding/binary" "fmt" "os" @@ -162,7 +163,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } - log.Debugf("create new chain bdb %s", bdbFile) + log.WithField("db", c.DatabaseID).Debugf("create new chain bdb %s", bdbFile) // Open LevelDB for ack/request/response tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" @@ -172,19 +173,13 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } - log.Debugf("create new chain tdb %s", tdbFile) + log.WithField("db", c.DatabaseID).Debugf("create new chain tdb %s", tdbFile) - // Open x.State - var ( - strg xi.Storage - state *x.State - ) + // Open storage + var strg xi.Storage if strg, err = xs.NewSqlite(c.DataFile); err != nil { return } - if state, err = x.NewState(c.Server, strg); err != nil { - return - } // Cache local private key var ( @@ -197,7 +192,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro } addr, err = crypto.PubKeyHash(pk.PubKey()) if err != nil { - log.WithError(err).Warning("failed to generate addr in NewChain") + log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in NewChain") return } @@ -207,7 +202,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro tdb: tdb, bi: newBlockIndex(), ai: newAckIndex(), - st: state, + st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), cl: rpc.NewCaller(), rt: newRunTime(ctx, c), ctx: ctx, @@ -261,16 +256,10 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } // Open x.State - var ( - strg xi.Storage - xstate *x.State - ) + var strg xi.Storage if strg, err = xs.NewSqlite(c.DataFile); err != nil { return } - if xstate, err = x.NewState(c.Server, strg); err != nil { - return - } // Cache local private key var ( @@ -283,7 +272,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } addr, err = crypto.PubKeyHash(pk.PubKey()) if err != nil { - log.WithError(err).Warning("failed to generate addr in LoadChain") + log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in LoadChain") return } @@ -293,7 +282,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err tdb: tdb, bi: newBlockIndex(), ai: newAckIndex(), - st: xstate, + st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), cl: rpc.NewCaller(), rt: newRunTime(ctx, c), ctx: ctx, @@ -328,6 +317,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "peer": chain.rt.getPeerInfoString(), "state": st, + "db": c.DatabaseID, }).Debug("loading state from database") // Read blocks and rebuild memory index @@ -355,6 +345,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "peer": chain.rt.getPeerInfoString(), "block": block.BlockHash().String(), + "db": c.DatabaseID, }).Debug("loading block from database") if last == nil { @@ -413,6 +404,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "height": h, "header": resp.Hash().String(), + "db": c.DatabaseID, }).Debug("loaded new resp header") } if err = respIter.Error(); err != nil { @@ -434,6 +426,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "height": h, "header": ack.Hash().String(), + "db": c.DatabaseID, }).Debug("loaded new ack header") } if err = respIter.Error(); err != nil { @@ -488,11 +481,12 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { // Keep track of the queries from the new block var ierr error for i, v := range b.QueryTxs { - if ierr = c.addResponse(v.Response); ierr != nil { + if ierr = c.AddResponse(v.Response); ierr != nil { log.WithFields(log.Fields{ "index": i, "producer": b.Producer(), "block_hash": b.BlockHash(), + "db": c.databaseID, }).WithError(ierr).Warn("failed to add response to ackIndex") } } @@ -502,6 +496,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { "index": i, "producer": b.Producer(), "block_hash": b.BlockHash(), + "db": c.databaseID, }).WithError(ierr).Warn("failed to remove Ack from ackIndex") } } @@ -524,6 +519,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { return "|" }(), st.Head.String()[:8]), "headHeight": c.rt.getHead().Height, + "db": c.databaseID, }).Info("pushed new block") } @@ -532,8 +528,8 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { - log.Debugf("push ack %s", ack.Hash().String()) - h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) + log.WithField("db", c.databaseID).Debugf("push ack %s", ack.Hash().String()) + h := c.rt.getHeightFromTime(ack.GetResponseTimestamp()) k := heightToKey(h) var enc *bytes.Buffer @@ -543,13 +539,13 @@ func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash().AsBytes()) - if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) + if err = c.register(ack); err != nil { + err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) return } - if err = c.register(ack); err != nil { - err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) + if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { + err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) return } @@ -585,6 +581,10 @@ func (c *Chain) produceBlock(now time.Time) (err error) { // TODO(leventeliu): maybe block waiting at a ready channel instead? for !v.Ready() { time.Sleep(1 * time.Millisecond) + if c.rt.ctx.Err() != nil { + err = c.rt.ctx.Err() + return + } } block.QueryTxs[i] = &types.QueryAsTx{ // TODO(leventeliu): add acks for billing. @@ -609,6 +609,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).Debug("produced new block") // Advise new block to the other peers var ( @@ -648,6 +649,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).WithError(err).Error("failed to advise new block") } }(s) @@ -689,6 +691,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).WithError(err).Debug( "Failed to fetch block from peer") } else { @@ -706,6 +709,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).Debug( "Fetch block from remote peer successfully") succ = true @@ -721,6 +725,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).Debug( "Cannot get block from any peer") } @@ -746,6 +751,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).Debug("run current turn") if c.rt.getHead().Height < c.rt.getNextTurn()-1 { @@ -756,6 +762,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).Error("A block will be skipped") } @@ -769,6 +776,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "time": c.rt.getChainTimeString(), "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).WithError(err).Error( "Failed to produce block") } @@ -792,6 +800,7 @@ func (c *Chain) mainCycle(ctx context.Context) { // "head_block": c.rt.getHead().Head.String(), // "using_timestamp": t.Format(time.RFC3339Nano), // "duration": d, + // "db": c.databaseID, //}).Debug("main cycle") time.Sleep(d) } else { @@ -806,6 +815,7 @@ func (c *Chain) sync() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("synchronizing chain state") for { @@ -857,6 +867,7 @@ func (c *Chain) processBlocks(ctx context.Context) { log.WithFields(log.Fields{ "height": h, "stashs": len(stash), + "db": c.databaseID, }).Debug("read new height from channel") if stash != nil { wg.Add(1) @@ -873,6 +884,7 @@ func (c *Chain) processBlocks(ctx context.Context) { "head_block": c.rt.getHead().Head.String(), "block_height": height, "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).Debug("processing new block") if height > c.rt.getNextTurn()-1 { @@ -892,6 +904,7 @@ func (c *Chain) processBlocks(ctx context.Context) { "head_block": c.rt.getHead().Head.String(), "block_height": height, "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).WithError(err).Error("Failed to check and push new block") } else { head := c.rt.getHead() @@ -899,7 +912,7 @@ func (c *Chain) processBlocks(ctx context.Context) { if currentCount%c.updatePeriod == 0 { ub, err := c.billing(head.node) if err != nil { - log.WithError(err).Error("billing failed") + log.WithError(err).WithField("db", c.databaseID).Error("billing failed") } // allocate nonce nonceReq := &types.NextAccountNonceReq{} @@ -907,20 +920,20 @@ func (c *Chain) processBlocks(ctx context.Context) { nonceReq.Addr = *c.addr if err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { // allocate nonce failed - log.WithError(err).Warning("allocate nonce for transaction failed") + log.WithError(err).WithField("db", c.databaseID).Warning("allocate nonce for transaction failed") } ub.Nonce = nonceResp.Nonce if err = ub.Sign(c.pk); err != nil { - log.WithError(err).Warning("sign tx failed") + log.WithError(err).WithField("db", c.databaseID).Warning("sign tx failed") } - addTxReq := &types.AddTxReq{} + addTxReq := &types.AddTxReq{TTL: 1} addTxResp := &types.AddTxResp{} addTxReq.Tx = ub - log.Debugf("nonce in processBlocks: %d, addr: %s", + log.WithField("db", c.databaseID).Debugf("nonce in processBlocks: %d, addr: %s", addTxReq.Tx.GetAccountNonce(), addTxReq.Tx.GetAccountAddress()) if err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp); err != nil { - log.WithError(err).Warning("send tx failed") + log.WithError(err).WithField("db", c.databaseID).Warning("send tx failed") } } } @@ -953,11 +966,13 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("stopping chain") c.rt.stop(c.databaseID) log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("chain service and workers stopped") // Close LevelDB file var ierr error @@ -967,6 +982,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain database closed") if ierr = c.tdb.Close(); ierr != nil && err == nil { err = ierr @@ -974,6 +990,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain database closed") // Close state if ierr = c.st.Close(false); ierr != nil && err == nil { @@ -982,6 +999,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain state storage closed") return } @@ -1031,6 +1049,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { "blockparent": block.ParentHash().String(), "headblock": head.Head.String(), "headheight": head.Height, + "db": c.databaseID, }).WithError(err).Debug("checking new block from other peer") if head.Height == height && head.Head.IsEqual(block.BlockHash()) { @@ -1050,7 +1069,6 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { if block.Producer() == c.rt.server { return c.pushBlock(block) } - // Check block producer index, found := peers.Find(block.Producer()) @@ -1064,6 +1082,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { "time": c.rt.getChainTimeString(), "expected": next, "actual": index, + "db": c.databaseID, }).WithError(err).Error( "Failed to check new block") return ErrInvalidProducer @@ -1085,7 +1104,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { // VerifyAndPushAckedQuery verifies a acknowledged and signed query, and pushed it if valid. func (c *Chain) VerifyAndPushAckedQuery(ack *types.SignedAckHeader) (err error) { // TODO(leventeliu): check ack. - if c.rt.queryTimeIsExpired(ack.SignedResponseHeader().Timestamp) { + if c.rt.queryTimeIsExpired(ack.GetResponseTimestamp()) { err = errors.Wrapf(ErrQueryExpired, "Verify ack query, min valid height %d, ack height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) return } @@ -1179,33 +1198,25 @@ func (c *Chain) replicationCycle(ctx context.Context) { } // Query queries req from local chain state and returns the query results in resp. -func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { - var ref *x.QueryTracker +func (c *Chain) Query( + req *types.Request, isLeader bool) (tracker *x.QueryTracker, resp *types.Response, err error, +) { // TODO(leventeliu): we're using an external context passed by request. Make sure that // cancelling will be propagated to this context before chain instance stops. - if ref, resp, err = c.st.QueryWithContext(req.GetContext(), req); err != nil { - return - } - if err = resp.Sign(c.pk); err != nil { - return - } - if err = c.addResponse(&resp.Header); err != nil { - return - } - ref.UpdateResp(resp) - return + return c.st.QueryWithContext(req.GetContext(), req, isLeader) } -func (c *Chain) addResponse(resp *types.SignedResponseHeader) (err error) { - return c.ai.addResponse(c.rt.getHeightFromTime(resp.Request.Timestamp), resp) +// AddResponse addes a response to the ackIndex, awaiting for acknowledgement. +func (c *Chain) AddResponse(resp *types.SignedResponseHeader) (err error) { + return c.ai.addResponse(c.rt.getHeightFromTime(resp.GetRequestTimestamp()), resp) } func (c *Chain) register(ack *types.SignedAckHeader) (err error) { - return c.ai.register(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) + return c.ai.register(c.rt.getHeightFromTime(ack.GetRequestTimestamp()), ack) } func (c *Chain) remove(ack *types.SignedAckHeader) (err error) { - return c.ai.remove(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) + return c.ai.remove(c.rt.getHeightFromTime(ack.GetRequestTimestamp()), ack) } func (c *Chain) pruneBlockCache() { @@ -1230,7 +1241,7 @@ func (c *Chain) stat() { var ( ic = atomic.LoadInt32(&multiIndexCount) rc = atomic.LoadInt32(&responseCount) - tc = atomic.LoadInt32(&ackTrackerCount) + tc = atomic.LoadInt32(&ackCount) bc = atomic.LoadInt32(&cachedBlockCount) ) // Print chain stats @@ -1240,13 +1251,14 @@ func (c *Chain) stat() { "response_header_count": rc, "query_tracker_count": tc, "cached_block_count": bc, + "db": c.databaseID, }).Info("chain mem stats") // Print xeno stats c.st.Stat(c.databaseID) } func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { - log.Debugf("begin to billing from count %d", node.count) + log.WithField("db", c.databaseID).Debugf("begin to billing from count %d", node.count) var ( i, j uint64 minerAddr proto.AccountAddress @@ -1264,25 +1276,19 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { } } for _, tx := range block.QueryTxs { - if minerAddr, err = crypto.PubKeyHash(tx.Response.Signee); err != nil { - log.WithError(err).Warning("billing fail: miner addr") - return - } + minerAddr = tx.Response.ResponseAccount if userAddr, err = crypto.PubKeyHash(tx.Request.Header.Signee); err != nil { - log.WithError(err).Warning("billing fail: miner addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return } + if _, ok := minersMap[userAddr]; !ok { + minersMap[userAddr] = make(map[proto.AccountAddress]uint64) + } if tx.Request.Header.QueryType == types.ReadQuery { - if _, ok := minersMap[userAddr]; !ok { - minersMap[userAddr] = make(map[proto.AccountAddress]uint64) - } minersMap[userAddr][minerAddr] += tx.Response.RowCount usersMap[userAddr] += tx.Response.RowCount } else { - if _, ok := minersMap[userAddr]; !ok { - minersMap[userAddr] = make(map[proto.AccountAddress]uint64) - } minersMap[userAddr][minerAddr] += uint64(tx.Response.AffectedRows) usersMap[userAddr] += uint64(tx.Response.AffectedRows) } @@ -1290,13 +1296,16 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { for _, req := range block.FailedReqs { if minerAddr, err = crypto.PubKeyHash(block.Signee()); err != nil { - log.WithError(err).Warning("billing fail: miner addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return } if userAddr, err = crypto.PubKeyHash(req.Header.Signee); err != nil { - log.WithError(err).Warning("billing fail: user addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: user addr") return } + if _, ok := minersMap[userAddr][minerAddr]; !ok { + minersMap[userAddr] = make(map[proto.AccountAddress]uint64) + } minersMap[userAddr][minerAddr] += uint64(len(req.Payload.Queries)) usersMap[userAddr] += uint64(len(req.Payload.Queries)) @@ -1311,7 +1320,7 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { i = 0 j = 0 for userAddr, cost := range usersMap { - log.Debugf("user %s, cost %d", userAddr.String(), cost) + log.WithField("db", c.databaseID).Debugf("user %s, cost %d", userAddr.String(), cost) ub.Users[i] = &types.UserCost{ User: userAddr, Cost: cost, diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 16ab4238b..fd6085e4d 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -345,7 +345,7 @@ func TestMultiChain(t *testing.T) { if err != nil { t.Errorf("error occurred: %v", err) - } else if err = c.addResponse(resp); err != nil { + } else if err = c.AddResponse(resp); err != nil { t.Errorf("error occurred: %v", err) } diff --git a/sqlchain/config.go b/sqlchain/config.go index 4de594ca8..3822870e7 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -51,4 +51,6 @@ type Config struct { TokenType types.TokenType GasPrice uint64 UpdatePeriod uint64 + + IsolationLevel int } diff --git a/sqlchain/mux.go b/sqlchain/mux.go index 836f038c9..234c2c7a9 100644 --- a/sqlchain/mux.go +++ b/sqlchain/mux.go @@ -103,34 +103,6 @@ type MuxFetchBlockResp struct { FetchBlockResp } -// MuxSubscribeTransactionsReq defines a request of the SubscribeTransactions RPC method. -type MuxSubscribeTransactionsReq struct { - proto.Envelope - proto.DatabaseID - SubscribeTransactionsReq -} - -// MuxSubscribeTransactionsResp defines a response of the SubscribeTransactions RPC method. -type MuxSubscribeTransactionsResp struct { - proto.Envelope - proto.DatabaseID - SubscribeTransactionsResp -} - -// MuxCancelSubscriptionReq defines a request of the CancelSubscription RPC method. -type MuxCancelSubscriptionReq struct { - proto.Envelope - proto.DatabaseID - CancelSubscriptionReq -} - -// MuxCancelSubscriptionResp defines a response of the CancelSubscription RPC method. -type MuxCancelSubscriptionResp struct { - proto.Envelope - proto.DatabaseID - CancelSubscriptionResp -} - // AdviseNewBlock is the RPC method to advise a new produced block to the target server. func (s *MuxService) AdviseNewBlock(req *MuxAdviseNewBlockReq, resp *MuxAdviseNewBlockResp) error { if v, ok := s.serviceMap.Load(req.DatabaseID); ok { @@ -176,27 +148,3 @@ func (s *MuxService) FetchBlock(req *MuxFetchBlockReq, resp *MuxFetchBlockResp) return ErrUnknownMuxRequest } - -// SubscribeTransactions is the RPC method to subscribe transactions from the target server. -func (s *MuxService) SubscribeTransactions(req *MuxSubscribeTransactionsReq, resp *MuxSubscribeTransactionsResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - req.SubscribeTransactionsReq.SubscriberID = req.GetNodeID().ToNodeID() - return v.(*ChainRPCService).SubscribeTransactions(&req.SubscribeTransactionsReq, &resp.SubscribeTransactionsResp) - } - - return ErrUnknownMuxRequest -} - -// CancelSubscription is the RPC method to cancel subscription from the target server. -func (s *MuxService) CancelSubscription(req *MuxCancelSubscriptionReq, resp *MuxCancelSubscriptionResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - req.CancelSubscriptionReq.SubscriberID = req.GetNodeID().ToNodeID() - return v.(*ChainRPCService).CancelSubscription(&req.CancelSubscriptionReq, &resp.CancelSubscriptionResp) - } - - return ErrUnknownMuxRequest -} diff --git a/sqlchain/rpc.go b/sqlchain/rpc.go index e9691d668..40b4a84b7 100644 --- a/sqlchain/rpc.go +++ b/sqlchain/rpc.go @@ -17,7 +17,6 @@ package sqlchain import ( - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" ) @@ -64,24 +63,6 @@ type FetchBlockResp struct { Block *types.Block } -// SubscribeTransactionsReq defines a request of SubscribeTransaction RPC method. -type SubscribeTransactionsReq struct { - SubscriberID proto.NodeID - Height int32 -} - -// SubscribeTransactionsResp defines a response of SubscribeTransaction RPC method. -type SubscribeTransactionsResp struct { -} - -// CancelSubscriptionReq defines a request of CancelSubscription RPC method. -type CancelSubscriptionReq struct { - SubscriberID proto.NodeID -} - -// CancelSubscriptionResp defines a response of CancelSubscription RPC method. -type CancelSubscriptionResp struct{} - // AdviseNewBlock is the RPC method to advise a new produced block to the target server. func (s *ChainRPCService) AdviseNewBlock(req *AdviseNewBlockReq, resp *AdviseNewBlockResp) ( err error) { @@ -107,13 +88,3 @@ func (s *ChainRPCService) FetchBlock(req *FetchBlockReq, resp *FetchBlockResp) ( resp.Block, err = s.chain.FetchBlock(req.Height) return } - -// SubscribeTransactions is the RPC method to fetch subscribe new packed and confirmed transactions from the target server. -func (s *ChainRPCService) SubscribeTransactions(req *SubscribeTransactionsReq, _ *SubscribeTransactionsResp) error { - return s.chain.AddSubscription(req.SubscriberID, req.Height) -} - -// CancelSubscription is the RPC method to cancel subscription in the target server. -func (s *ChainRPCService) CancelSubscription(req *CancelSubscriptionReq, _ *CancelSubscriptionResp) error { - return s.chain.CancelSubscription(req.SubscriberID) -} diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 80b5719cb..ba2c184af 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -146,9 +146,10 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( resp := &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: *req, - NodeID: worker.NodeID, - Timestamp: createRandomTimeAfter(req.Timestamp, 100), + Request: req.RequestHeader, + RequestHash: req.Hash(), + NodeID: worker.NodeID, + Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, Payload: types.ResponsePayload{ @@ -166,7 +167,7 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( } } - if err = resp.Sign(worker.PrivateKey); err != nil { + if err = resp.BuildHash(); err != nil { return } @@ -180,14 +181,15 @@ func createRandomQueryAckWithResponse(resp *types.SignedResponseHeader, cli *nod ack := &types.Ack{ Header: types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: *resp, - NodeID: cli.NodeID, - Timestamp: createRandomTimeAfter(resp.Timestamp, 100), + Response: resp.ResponseHeader, + ResponseHash: resp.Hash(), + NodeID: cli.NodeID, + Timestamp: createRandomTimeAfter(resp.Timestamp, 100), }, }, } - if err = ack.Sign(cli.PrivateKey, true); err != nil { + if err = ack.Sign(cli.PrivateKey); err != nil { return } diff --git a/test/GNTE/conf/gnte_0.2ms.yaml b/test/GNTE/conf/gnte_0.2ms.yaml new file mode 100644 index 000000000..d89f65dff --- /dev/null +++ b/test/GNTE/conf/gnte_0.2ms.yaml @@ -0,0 +1,79 @@ +# Only support 10.250.0.2 ~ 10.250.254.254 +group: + - + name: bp + nodes: + - # bp10.250.1.2 + ip: 10.250.1.2/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_0/config.yaml" + - # bp10.250.1.3 + ip: 10.250.1.3/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_1/config.yaml" + - # bp10.250.1.4 + ip: 10.250.1.4/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_2/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + - + name: miner + nodes: + - # miner10.250.100.2 + ip: 10.250.100.2/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.2/config.yaml" + - # miner10.250.100.3 + ip: 10.250.100.3/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.3/config.yaml" + - # miner10.250.100.4 + ip: 10.250.100.4/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.4/config.yaml" + - # miner10.250.100.5 + ip: 10.250.100.5/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.5/config.yaml" + - # miner10.250.100.6 + ip: 10.250.100.6/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.6/config.yaml" + - # miner10.250.100.7 + ip: 10.250.100.7/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.7/config.yaml" + - # miner10.250.100.8 + ip: 10.250.100.8/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.8/config.yaml" + - # miner10.250.100.9 + ip: 10.250.100.9/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.9/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + - + name: client + nodes: + - # node_c + ip: 10.250.0.2/32 + cmd: "ping -c3 g.cn" + - # node_adapter + ip: 10.250.0.254/32 + cmd: "cd /scripts && ./bin/cql-adapter -config ./node_c/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + +network: + - + groups: + - bp + - miner + delay: "0.2ms" + rate: "1000mbit" + + - + groups: + - bp + - client + delay: "0.2ms" + rate: "1000mbit" + + - + groups: + - client + - miner + delay: "0.2ms" + rate: "1000mbit" + diff --git a/test/GNTE/run.sh b/test/GNTE/run.sh index 2d6be6b36..650874de9 100755 --- a/test/GNTE/run.sh +++ b/test/GNTE/run.sh @@ -1,8 +1,13 @@ #!/bin/bash -x -yaml=( - ./scripts/gnte_{0,5,20,100,200}ms.yaml -) +param=$1 +if [ "fast" == "$param" ]; then + yaml=./scripts/gnte_0ms.yaml +else + yaml=( + ./scripts/gnte_{0,0.2,5,20,100}ms.yaml + ) +fi TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) @@ -44,7 +49,7 @@ do # Bench GNTE cd ${PROJECT_DIR}/cmd/cql-minerd/ - bash -x ./benchGNTE.sh + bash -x ./benchGNTE.sh $param echo "${gnte_yaml}" >> ${tmp_file} grep BenchmarkMinerGNTE gnte.log >> ${tmp_file} echo "" >> ${tmp_file} diff --git a/test/bench_testnet/node_c/config.yaml b/test/bench_testnet/node_c/config.yaml new file mode 100644 index 000000000..9bade087e --- /dev/null +++ b/test/bench_testnet/node_c/config.yaml @@ -0,0 +1,152 @@ +IsTestMode: true +StartupSyncHoles: true +WorkingRoot: ./ +PubKeyStoreFile: public.keystore +PrivateKeyFile: private.key +DHTFileName: dht.db +ListenAddr: 0.0.0.0:15151 +ThisNodeID: 00000086571eeee68e89a00635dda04149ea4048a2c7165738fc0fb8287e42a7 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 +BlockProducer: + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + NodeID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 + Nonce: + a: 14396347928 + b: 0 + c: 0 + d: 6148914694092305796 + ChainFileName: chain.db + BPGenesisInfo: + Version: 1 + Producer: "0000000000000000000000000000000000000000000000000000000000000001" + MerkleRoot: "0000000000000000000000000000000000000000000000000000000000000001" + ParentHash: "0000000000000000000000000000000000000000000000000000000000000001" + Timestamp: 2019-01-02T13:33:00Z + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 58aceaf4b730b54bf00c0fb3f7b14886de470767f313c2d108968cd8bf0794b7 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 + Role: Leader + Addr: bp00.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 14396347928 + b: 0 + c: 0 + d: 6148914694092305796 +- ID: 000000000013fd4b3180dd424d5a895bc57b798e5315087b7198c926d8893f98 + Role: Follower + Addr: bp01.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 789554103 + b: 0 + c: 0 + d: 8070450536379825883 +- ID: 00000000001771e2b2e12b6f9f85d58ef5261a4b98a2e80bba0c5ef7bd72c499 + Role: Follower + Addr: bp02.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 1822880492 + b: 0 + c: 0 + d: 8646911286604382906 +- ID: 000000000014a2f14e79aec0a27a2a669aab416c392d5577760d43ed8503020d + Role: Follower + Addr: bp03.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 2552803966 + b: 0 + c: 0 + d: 9079256850862786277 +- ID: 00000000003b2bd120a7d07f248b181fc794ba8b278f07f9a780e61eb77f6abb + Role: Follower + Addr: bp04.hk.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 2449538793 + b: 0 + c: 0 + d: 8791026473473316840 +- ID: 0000000000293f7216362791b6b1c9772184d6976cb34310c42547735410186c + Role: Follower + Addr: bp05.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 746598970 + b: 0 + c: 0 + d: 10808639108098016056 +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Role: Miner + Addr: miner00.cn.gridb.io:7778 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Role: Miner + Addr: miner01.cn.gridb.io:7778 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Role: Client + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 +- ID: 00000086571eeee68e89a00635dda04149ea4048a2c7165738fc0fb8287e42a7 + Role: Client + Addr: 0.0.0.0:15151 + PublicKey: 039578c9edf700bf847eef8d24369a12c0aabbd56abefc9c5beb773fed969fe9f8 + Nonce: + a: 708150 + b: 0 + c: 0 + d: 1082761333 +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 10s +BPTick: 3s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/test/bench_testnet/node_c/private.key b/test/bench_testnet/node_c/private.key new file mode 100644 index 000000000..558a164e7 --- /dev/null +++ b/test/bench_testnet/node_c/private.key @@ -0,0 +1 @@ +MdC1n849xkeyn5nRTp2rqbLz1bSj4KrTrPawLLHSPPikNLffhHY7xUntkKPbT67o4uupu7DEUTsR5P27HphpRsdYaebu8T \ No newline at end of file diff --git a/test/fuse/node_miner_0/config.yaml b/test/fuse/node_miner_0/config.yaml index 81ec69cfe..c40ddaad0 100644 --- a/test/fuse/node_miner_0/config.yaml +++ b/test/fuse/node_miner_0/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/test/fuse/node_miner_1/config.yaml b/test/fuse/node_miner_1/config.yaml index 84eefab7d..118567931 100644 --- a/test/fuse/node_miner_1/config.yaml +++ b/test/fuse/node_miner_1/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/test/fuse/node_miner_2/config.yaml b/test/fuse/node_miner_2/config.yaml index 34d8fd5ad..25a303475 100644 --- a/test/fuse/node_miner_2/config.yaml +++ b/test/fuse/node_miner_2/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index 8016896ab..ceac395a8 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index f4b53d9cb..41eb0305b 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index aafdfefc9..51ec8f581 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/leak/client.yaml b/test/leak/client.yaml index 77bee3fca..ca8416194 100644 --- a/test/leak/client.yaml +++ b/test/leak/client.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,24 +37,23 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2331 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client - + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2331 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client diff --git a/test/leak/leader.yaml b/test/leak/leader.yaml index 93ddef97b..fdd9b219e 100644 --- a/test/leak/leader.yaml +++ b/test/leak/leader.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,14 +42,14 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2331 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2331 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader diff --git a/test/node_0/config.yaml b/test/node_0/config.yaml index 52ce19b51..5993a318b 100644 --- a/test/node_0/config.yaml +++ b/test/node_0/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,68 +42,68 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2122 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 127.0.0.1:2121 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 127.0.0.1:2120 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 127.0.0.1:2144 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 127.0.0.1:2145 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 127.0.0.1:2146 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:2121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:2120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/observation/node_0/config.yaml b/test/observation/node_0/config.yaml index 21ad53261..e5c50bcd7 100644 --- a/test/observation/node_0/config.yaml +++ b/test/observation/node_0/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_1/config.yaml b/test/observation/node_1/config.yaml index 2ade86811..a301e204b 100644 --- a/test/observation/node_1/config.yaml +++ b/test/observation/node_1/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_2/config.yaml b/test/observation/node_2/config.yaml index 98f8b5bc0..3fd450a21 100644 --- a/test/observation/node_2/config.yaml +++ b/test/observation/node_2/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_observer/config.yaml b/test/observation/node_observer/config.yaml index fc516fca0..16d985851 100644 --- a/test/observation/node_observer/config.yaml +++ b/test/observation/node_observer/config.yaml @@ -4,7 +4,7 @@ PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" DHTFileName: "dht.db" ListenAddr: "127.0.0.1:4123" -ThisNodeID: "0000002100a44923021af2c91822e47998b0842cd450774c020257304acdce0b" +ThisNodeID: "00000045aecffbb1dc33a9846a2d4d1ca09593c3a316bb4ec635889ac3a8b0aa" QPS: 1000 BillingBlockCount: 3600 ChainBusPeriod: 1s @@ -71,14 +71,14 @@ KnownNodes: Addr: 127.0.0.1:4120 PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" Role: Follower -- ID: 0000002100a44923021af2c91822e47998b0842cd450774c020257304acdce0b +- ID: 00000045aecffbb1dc33a9846a2d4d1ca09593c3a316bb4ec635889ac3a8b0aa Nonce: - a: 819961 + a: 4399024610213 b: 0 c: 0 - d: 7322664668 + d: 0 Addr: 127.0.0.1:4123 - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + PublicKey: 02505a09a833710b691a570c5de399f3633ec4752422ae80b75f0dc8a8acc48c62 Role: Client - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade Nonce: diff --git a/test/observation/node_observer/private.key b/test/observation/node_observer/private.key index f563980c1..595bb0f5a 100644 Binary files a/test/observation/node_observer/private.key and b/test/observation/node_observer/private.key differ diff --git a/test/pool/client.yaml b/test/pool/client.yaml index 7bb71b3ee..208481519 100644 --- a/test/pool/client.yaml +++ b/test/pool/client.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,24 +37,23 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2530 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client - + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2530 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client diff --git a/test/pool/leader.yaml b/test/pool/leader.yaml index 17403f548..e7fd47f13 100644 --- a/test/pool/leader.yaml +++ b/test/pool/leader.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,14 +37,14 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2530 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2530 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader diff --git a/test/service/fullnode_0/config.yaml b/test/service/fullnode_0/config.yaml new file mode 100644 index 000000000..a54e62c05 --- /dev/null +++ b/test/service/fullnode_0/config.yaml @@ -0,0 +1,77 @@ +IsTestMode: true +StartupSyncHoles: false +WorkingRoot: ./ +PubKeyStoreFile: public.keystore +PrivateKeyFile: private.key +DHTFileName: dht.db +ListenAddr: 172.254.1.11:4661 +ThisNodeID: 00000041772ecd779c68a3928d12675d9a65dce02f2ad6907f2cf53013f7e652 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 +BlockProducer: + PublicKey: '02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24' + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: 'chain.db' + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: '2019-01-10T12:49:07+08:00' + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: '02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24' + Role: Leader + - ID: 00000041772ecd779c68a3928d12675d9a65dce02f2ad6907f2cf53013f7e652 + Role: Client + Addr: 172.254.1.11:4661 + PublicKey: 03ff62aa105dc94c2cea1e3e150a5fafbceb230868b7ed0b0f950915499dfeeadd + Nonce: + a: 3631427 + b: 3627950475 + c: 0 + d: 0 +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/test/service/fullnode_0/private.key b/test/service/fullnode_0/private.key new file mode 100644 index 000000000..900b35b9a --- /dev/null +++ b/test/service/fullnode_0/private.key @@ -0,0 +1 @@ +MaXCmBDcFoQiPL8svDi36Z7MHRHg681uVL7jYd2hDgYo5E8G3yk8n84tfajNkd3Ypbhuc2u12o8x8nrq53dM3g5r3sAq5A \ No newline at end of file diff --git a/test/service/node_0/config.yaml b/test/service/node_0/config.yaml index a990c8734..31391385e 100644 --- a/test/service/node_0/config.yaml +++ b/test/service/node_0/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_1/config.yaml b/test/service/node_1/config.yaml index 029cce62a..5e2fa7460 100644 --- a/test/service/node_1/config.yaml +++ b/test/service/node_1/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_2/config.yaml b/test/service/node_2/config.yaml index 056e1ce3b..413220a15 100644 --- a/test/service/node_2/config.yaml +++ b/test/service/node_2/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_adapter/config.yaml b/test/service/node_adapter/config.yaml index 7e281ee34..98d362e5f 100644 --- a/test/service/node_adapter/config.yaml +++ b/test/service/node_adapter/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,69 +57,69 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner Adapter: ListenAddr: 0.0.0.0:4661 CertificatePath: ./server.test.covenantsql.io.pem diff --git a/test/service/node_c/config.yaml b/test/service/node_c/config.yaml index 3e00d1d89..94b974476 100644 --- a/test/service/node_c/config.yaml +++ b/test/service/node_c/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,71 +42,71 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:11099 #172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 127.0.0.1:11100 #172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 127.0.0.1:11101 #172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 127.0.0.1:11102 #172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 127.0.0.1:11103 #172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 127.0.0.1:11104 #172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:11099 #172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:11100 #172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:11101 #172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:11102 #172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:11103 #172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:11104 #172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner Adapter: ListenAddr: 0.0.0.0:4661 CertificatePath: ./server.test.covenantsql.io.pem diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh new file mode 100755 index 000000000..a17511d0c --- /dev/null +++ b/test/testnet_client/run.sh @@ -0,0 +1,38 @@ +#!/bin/bash -x + +set -e + +TEST_WD=$(cd $(dirname $0)/; pwd) +PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) + +echo ${PROJECT_DIR} + +# Build +# cd ${PROJECT_DIR} && make clean +# cd ${PROJECT_DIR} && make use_all_cores + +cd ${TEST_WD} +echo -ne "y\n" | ${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key +${PROJECT_DIR}/bin/cql-utils -tool addrgen -skip-master-key | tee wallet.txt + +#get wallet addr +wallet=$(awk '{print $3}' wallet.txt) + +#transfer some coin to above address +${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm + +${PROJECT_DIR}/bin/cql -get-balance + +${PROJECT_DIR}/bin/cql -create 2 -wait-tx-confirm | tee dsn.txt + +#get dsn +dsn=$(cat dsn.txt) + +${PROJECT_DIR}/bin/cql -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + +${PROJECT_DIR}/bin/cql -dsn ${dsn} \ + -command 'show tables;' | tee result.log + +grep "1 row" result.log diff --git a/types/account.go b/types/account.go index 8f8b7295e..b0eac76b1 100644 --- a/types/account.go +++ b/types/account.go @@ -17,11 +17,16 @@ package types import ( + "encoding/json" + "strings" + "sync" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/proto" ) //go:generate hsp +//hsp:ignore PermStat // SQLChainRole defines roles of account in a SQLChain. type SQLChainRole byte @@ -35,56 +40,169 @@ const ( NumberOfRoles ) +// UserPermissionRole defines role of user permission including admin/write/read. +type UserPermissionRole int32 + // UserPermission defines permissions of a SQLChain user. -type UserPermission int32 +type UserPermission struct { + // User role to access database. + Role UserPermissionRole + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string + + // patterns map cache for matching + cachedPatternMapOnce sync.Once + cachedPatternMap map[string]bool +} const ( - // Void defines the initial permission. - Void UserPermission = iota - // Admin defines the admin user permission. - Admin + // Read defines the read user permission. + Read UserPermissionRole = 1 << iota // Write defines the writer user permission. Write - // Read defines the reader user permission. - Read - // NumberOfUserPermission defines the user permission number. - NumberOfUserPermission + // Super defines the super user permission. + Super + + // ReadOnly defines the reader user permission. + ReadOnly = Read + // WriteOnly defines the writer user permission. + WriteOnly = Write + // ReadWrite defines the reader && writer user permission. + ReadWrite = Read | Write + // Admin defines the privilege to full control the database. + Admin = Read | Write | Super + + // Void defines the initial permission. + Void UserPermissionRole = 0 ) -// CheckRead returns true if user owns read permission. -func (up *UserPermission) CheckRead() bool { - return *up >= Admin && *up < NumberOfUserPermission +// UnmarshalJSON implements the json.Unmarshler interface. +func (r *UserPermissionRole) UnmarshalJSON(data []byte) (err error) { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return + } + r.FromString(s) + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (r UserPermissionRole) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +// String implements the fmt.Stringer interface. +func (r UserPermissionRole) String() string { + if r == Void { + return "Void" + } else if r == Admin { + return "Admin" + } + + var res []string + if r&Read != 0 { + res = append(res, "Read") + } + if r&Write != 0 { + res = append(res, "Write") + } + if r&Super != 0 { + res = append(res, "Super") + } + + return strings.Join(res, ",") } -// CheckWrite returns true if user owns write permission. -func (up *UserPermission) CheckWrite() bool { - return *up >= Admin && *up <= Write +// FromString converts string to UserPermissionRole. +func (r *UserPermissionRole) FromString(perm string) { + if perm == "Void" { + *r = Void + return + } else if perm == "Admin" { + *r = Admin + return + } + + *r = Void + + for _, p := range strings.Split(perm, ",") { + p = strings.TrimSpace(p) + switch p { + case "Read": + *r |= Read + case "Write": + *r |= Write + case "Super": + *r |= Super + } + } +} + +// UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. +func UserPermissionFromRole(role UserPermissionRole) *UserPermission { + return &UserPermission{ + Role: role, + } } -// CheckAdmin returns true if user owns admin permission. -func (up *UserPermission) CheckAdmin() bool { - return *up == Admin +// HasReadPermission returns true if user owns read permission. +func (up *UserPermission) HasReadPermission() bool { + if up == nil { + return false + } + return up.Role&Read != 0 } -// Valid returns true if the value is a meaning permission value. -func (up *UserPermission) Valid() bool { - return *up >= Admin && *up < NumberOfUserPermission +// HasWritePermission returns true if user owns write permission. +func (up *UserPermission) HasWritePermission() bool { + if up == nil { + return false + } + return up.Role&Write != 0 } -// FromString converts string to UserPermission. -func (up *UserPermission) FromString(perm string) { - switch perm { - case "Admin": - *up = Admin - case "Write": - *up = Write - case "Read": - *up = Read - case "Void": - *up = Void - default: - *up = NumberOfUserPermission +// HasSuperPermission returns true if user owns super permission. +func (up *UserPermission) HasSuperPermission() bool { + if up == nil { + return false } + return up.Role&Super != 0 +} + +// IsValid returns whether the permission object is valid or not. +func (up *UserPermission) IsValid() bool { + return up != nil && up.Role != 0 +} + +// HasDisallowedQueryPatterns returns whether the queries are permitted. +func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query string, status bool) { + if up == nil { + status = true + return + } + if len(up.Patterns) == 0 { + status = false + return + } + + up.cachedPatternMapOnce.Do(func() { + up.cachedPatternMap = make(map[string]bool, len(up.Patterns)) + for _, p := range up.Patterns { + up.cachedPatternMap[p] = true + } + }) + + for _, q := range queries { + if !up.cachedPatternMap[q.Pattern] { + // not permitted + query = q.Pattern + status = true + break + } + } + + return } // Status defines status of a SQLChain user/miner. @@ -112,14 +230,14 @@ func (s *Status) EnableQuery() bool { // PermStat defines the permissions status structure. type PermStat struct { - Permission UserPermission + Permission *UserPermission Status Status } // SQLChainUser defines a SQLChain user. type SQLChainUser struct { Address proto.AccountAddress - Permission UserPermission + Permission *UserPermission AdvancePayment uint64 Arrears uint64 Deposit uint64 diff --git a/types/account_gen.go b/types/account_gen.go index e69e63642..320d3ebd0 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -11,31 +11,28 @@ func (z *Account) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) - o = hsp.AppendArrayHeader(o, uint32(SupportTokenNumber)) - for za0001 := range z.TokenBalance { - o = hsp.AppendUint64(o, z.TokenBalance[za0001]) - } - o = append(o, 0x84) - o = hsp.AppendFloat64(o, z.Rating) o = append(o, 0x84) - if oTemp, err := z.NextNonce.MarshalHash(); err != nil { + if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Address.MarshalHash(); err != nil { + if oTemp, err := z.NextNonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendFloat64(o, z.Rating) + o = hsp.AppendArrayHeader(o, uint32(SupportTokenNumber)) + for za0001 := range z.TokenBalance { + o = hsp.AppendUint64(o, z.TokenBalance[za0001]) + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Account) Msgsize() (s int) { - s = 1 + 13 + hsp.ArrayHeaderSize + (int(SupportTokenNumber) * (hsp.Uint64Size)) + 7 + hsp.Float64Size + 10 + z.NextNonce.Msgsize() + 8 + z.Address.Msgsize() + s = 1 + 8 + z.Address.Msgsize() + 10 + z.NextNonce.Msgsize() + 7 + hsp.Float64Size + 13 + hsp.ArrayHeaderSize + (int(SupportTokenNumber) * (hsp.Uint64Size)) return } @@ -44,53 +41,44 @@ func (z *MinerInfo) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 9 - o = append(o, 0x89, 0x89) - o = hsp.AppendInt32(o, int32(z.Status)) o = append(o, 0x89) + if oTemp, err := z.Address.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Deposit) + o = hsp.AppendString(o, z.EncryptionKey) + o = hsp.AppendString(o, z.Name) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.PendingIncome) + o = hsp.AppendUint64(o, z.ReceivedIncome) + o = hsp.AppendInt32(o, int32(z.Status)) o = hsp.AppendArrayHeader(o, uint32(len(z.UserArrears))) for za0001 := range z.UserArrears { if z.UserArrears[za0001] == nil { o = hsp.AppendNil(o) } else { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.UserArrears[za0001].User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendUint64(o, z.UserArrears[za0001].Arrears) } } - o = append(o, 0x89) - if oTemp, err := z.Address.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - o = hsp.AppendString(o, z.Name) - o = append(o, 0x89) - o = hsp.AppendString(o, z.EncryptionKey) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.PendingIncome) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.ReceivedIncome) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Deposit) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerInfo) Msgsize() (s int) { - s = 1 + 7 + hsp.Int32Size + 12 + hsp.ArrayHeaderSize + s = 1 + 8 + z.Address.Msgsize() + 8 + hsp.Uint64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.StringPrefixSize + len(z.Name) + 7 + z.NodeID.Msgsize() + 14 + hsp.Uint64Size + 15 + hsp.Uint64Size + 7 + hsp.Int32Size + 12 + hsp.ArrayHeaderSize for za0001 := range z.UserArrears { if z.UserArrears[za0001] == nil { s += hsp.NilSize @@ -98,7 +86,6 @@ func (z *MinerInfo) Msgsize() (s int) { s += 1 + 5 + z.UserArrears[za0001].User.Msgsize() + 8 + hsp.Uint64Size } } - s += 8 + z.Address.Msgsize() + 7 + z.NodeID.Msgsize() + 5 + hsp.StringPrefixSize + len(z.Name) + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 14 + hsp.Uint64Size + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size return } @@ -107,13 +94,22 @@ func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 9 - o = append(o, 0x89, 0x89) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x89) + o = hsp.AppendUint64(o, z.Deposit) + o = hsp.AppendUint64(o, z.GasPrice) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x89) + if oTemp, err := z.Provider.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetUser))) for za0001 := range z.TargetUser { if oTemp, err := z.TargetUser[za0001].MarshalHash(); err != nil { @@ -122,38 +118,21 @@ func (z *ProviderProfile) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x89) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x89) - if oTemp, err := z.Provider.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Deposit) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProviderProfile) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 11 + hsp.ArrayHeaderSize + s = 1 + 8 + hsp.Uint64Size + 9 + hsp.Uint64Size + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 9 + z.Provider.Msgsize() + 6 + hsp.Uint64Size + 11 + hsp.ArrayHeaderSize for za0001 := range z.TargetUser { s += z.TargetUser[za0001].Msgsize() } - s += 14 + hsp.Float64Size + 9 + z.Provider.Msgsize() + 7 + z.NodeID.Msgsize() + 8 + hsp.Uint64Size + 9 + hsp.Uint64Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 10 + z.TokenType.Msgsize() return } @@ -162,19 +141,25 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 11 - o = append(o, 0x8b, 0x8b) - if oTemp, err := z.Meta.MarshalHash(); err != nil { + o = append(o, 0x8b) + if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x8b) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = hsp.AppendBytes(o, z.EncodedGenesis) + o = hsp.AppendUint64(o, z.GasPrice) + if oTemp, err := z.ID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint32(o, z.LastUpdatedHeight) + if oTemp, err := z.Meta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x8b) o = hsp.AppendArrayHeader(o, uint32(len(z.Miners))) for za0001 := range z.Miners { if z.Miners[za0001] == nil { @@ -187,7 +172,17 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x8b) + if oTemp, err := z.Owner.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Period) + if oTemp, err := z.TokenType.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Users))) for za0002 := range z.Users { if z.Users[za0002] == nil { @@ -200,38 +195,12 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x8b) - o = hsp.AppendBytes(o, z.EncodedGenesis) - o = append(o, 0x8b) - if oTemp, err := z.Owner.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - if oTemp, err := z.Address.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - if oTemp, err := z.ID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - o = hsp.AppendUint32(o, z.LastUpdatedHeight) - o = append(o, 0x8b) - o = hsp.AppendUint64(o, z.Period) - o = append(o, 0x8b) - o = hsp.AppendUint64(o, z.GasPrice) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainProfile) Msgsize() (s int) { - s = 1 + 5 + z.Meta.Msgsize() + 10 + z.TokenType.Msgsize() + 7 + hsp.ArrayHeaderSize + s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.BytesPrefixSize + len(z.EncodedGenesis) + 9 + hsp.Uint64Size + 3 + z.ID.Msgsize() + 18 + hsp.Uint32Size + 5 + z.Meta.Msgsize() + 7 + hsp.ArrayHeaderSize for za0001 := range z.Miners { if z.Miners[za0001] == nil { s += hsp.NilSize @@ -239,7 +208,7 @@ func (z *SQLChainProfile) Msgsize() (s int) { s += z.Miners[za0001].Msgsize() } } - s += 6 + hsp.ArrayHeaderSize + s += 6 + z.Owner.Msgsize() + 7 + hsp.Uint64Size + 10 + z.TokenType.Msgsize() + 6 + hsp.ArrayHeaderSize for za0002 := range z.Users { if z.Users[za0002] == nil { s += hsp.NilSize @@ -247,7 +216,6 @@ func (z *SQLChainProfile) Msgsize() (s int) { s += z.Users[za0002].Msgsize() } } - s += 15 + hsp.BytesPrefixSize + len(z.EncodedGenesis) + 6 + z.Owner.Msgsize() + 8 + z.Address.Msgsize() + 3 + z.ID.Msgsize() + 18 + hsp.Uint32Size + 7 + hsp.Uint64Size + 9 + hsp.Uint64Size return } @@ -270,28 +238,42 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) - o = hsp.AppendInt32(o, int32(z.Status)) - o = append(o, 0x86) - o = hsp.AppendInt32(o, int32(z.Permission)) o = append(o, 0x86) if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendUint64(o, z.AdvancePayment) - o = append(o, 0x86) o = hsp.AppendUint64(o, z.Arrears) - o = append(o, 0x86) o = hsp.AppendUint64(o, z.Deposit) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Permission.Role)) + o = hsp.AppendArrayHeader(o, uint32(len(z.Permission.Patterns))) + for za0001 := range z.Permission.Patterns { + o = hsp.AppendString(o, z.Permission.Patterns[za0001]) + } + } + o = hsp.AppendInt32(o, int32(z.Status)) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainUser) Msgsize() (s int) { - s = 1 + 7 + hsp.Int32Size + 11 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Permission.Patterns { + s += hsp.StringPrefixSize + len(z.Permission.Patterns[za0001]) + } + } + s += 7 + hsp.Int32Size return } @@ -314,25 +296,48 @@ func (z *UserArrears) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendUint64(o, z.Arrears) if oTemp, err := z.User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendUint64(o, z.Arrears) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UserArrears) Msgsize() (s int) { - s = 1 + 5 + z.User.Msgsize() + 8 + hsp.Uint64Size + s = 1 + 8 + hsp.Uint64Size + 5 + z.User.Msgsize() + return +} + +// MarshalHash marshals for hash +func (z *UserPermission) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Patterns))) + for za0001 := range z.Patterns { + o = hsp.AppendString(o, z.Patterns[za0001]) + } + o = hsp.AppendInt32(o, int32(z.Role)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UserPermission) Msgsize() (s int) { + s = 1 + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Patterns { + s += hsp.StringPrefixSize + len(z.Patterns[za0001]) + } + s += 5 + hsp.Int32Size return } // MarshalHash marshals for hash -func (z UserPermission) MarshalHash() (o []byte, err error) { +func (z UserPermissionRole) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) o = hsp.AppendInt32(o, int32(z)) @@ -340,7 +345,7 @@ func (z UserPermission) MarshalHash() (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z UserPermission) Msgsize() (s int) { +func (z UserPermissionRole) Msgsize() (s int) { s = hsp.Int32Size return } diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 30e9ad803..388a19ddb 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -230,3 +230,40 @@ func BenchmarkAppendMsgUserArrears(b *testing.B) { bts, _ = v.MarshalHash() } } + +func TestMarshalHashUserPermission(t *testing.T) { + v := UserPermission{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUserPermission(b *testing.B) { + v := UserPermission{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUserPermission(b *testing.B) { + v := UserPermission{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/account_test.go b/types/account_test.go new file mode 100644 index 000000000..828930869 --- /dev/null +++ b/types/account_test.go @@ -0,0 +1,100 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "encoding/json" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestUserPermissionFromRole(t *testing.T) { + Convey("test marshal/unmarshal json", t, func() { + jsonBytes, err := json.Marshal(Read) + So(err, ShouldBeNil) + So(jsonBytes, ShouldResemble, []byte(`"Read"`)) + var r UserPermissionRole + So(r, ShouldEqual, Void) + err = json.Unmarshal([]byte(`"Write"`), &r) + So(err, ShouldBeNil) + So(r, ShouldEqual, Write) + err = json.Unmarshal([]byte(`"Read,Write"`), &r) + So(err, ShouldBeNil) + So(r, ShouldEqual, ReadWrite) + }) + Convey("test string/from string", t, func() { + var r UserPermissionRole + So(r, ShouldEqual, Void) + r.FromString(Read.String()) + So(r, ShouldEqual, Read) + r.FromString(ReadWrite.String()) + So(r, ShouldEqual, ReadWrite) + }) +} + +func TestUserPermission(t *testing.T) { + Convey("nil protect", t, func() { + p := (*UserPermission)(nil) + So(p.HasReadPermission(), ShouldBeFalse) + So(p.HasWritePermission(), ShouldBeFalse) + So(p.HasSuperPermission(), ShouldBeFalse) + So(p.IsValid(), ShouldBeFalse) + _, state := p.HasDisallowedQueryPatterns([]Query{}) + So(state, ShouldBeTrue) + }) + Convey("has read permission", t, func() { + So(UserPermissionFromRole(Void).HasReadPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeFalse) + So(UserPermissionFromRole(ReadWrite).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasReadPermission(), ShouldBeTrue) + }) + Convey("has write permission", t, func() { + So(UserPermissionFromRole(Void).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(ReadWrite).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasWritePermission(), ShouldBeTrue) + }) + Convey("has admin permission", t, func() { + So(UserPermissionFromRole(Void).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(ReadWrite).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Admin).HasSuperPermission(), ShouldBeTrue) + }) + Convey("is valid", t, func() { + So(UserPermissionFromRole(Void).IsValid(), ShouldBeFalse) + So(UserPermissionFromRole(Read).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Write).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(ReadWrite).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).IsValid(), ShouldBeTrue) + }) + Convey("query patterns", t, func() { + // empty patterns limitation + _, state := UserPermissionFromRole(Read).HasDisallowedQueryPatterns([]Query{ + { + Pattern: "select 1", + }, + { + Pattern: "insert into test values(1)", + }, + }) + So(state, ShouldBeFalse) + }) +} diff --git a/types/ack_type.go b/types/ack_type.go index af24a9da9..267c8ca6a 100644 --- a/types/ack_type.go +++ b/types/ack_type.go @@ -29,9 +29,35 @@ import ( // AckHeader defines client ack entity. type AckHeader struct { - Response SignedResponseHeader `json:"r"` - NodeID proto.NodeID `json:"i"` // ack node id - Timestamp time.Time `json:"t"` // time in UTC zone + Response ResponseHeader `json:"r"` + ResponseHash hash.Hash `json:"rh"` + NodeID proto.NodeID `json:"i"` // ack node id + Timestamp time.Time `json:"t"` // time in UTC zone +} + +// GetQueryKey returns the request query key. +func (h *AckHeader) GetQueryKey() QueryKey { + return h.Response.Request.GetQueryKey() +} + +// GetRequestTimestamp returns the request timestamp. +func (h *AckHeader) GetRequestTimestamp() time.Time { + return h.Response.GetRequestTimestamp() +} + +// GetResponseTimestamp returns the response timestamp. +func (h *AckHeader) GetResponseTimestamp() time.Time { + return h.Response.Timestamp +} + +// GetRequestHash returns the request hash. +func (h *AckHeader) GetRequestHash() hash.Hash { + return h.Response.GetRequestHash() +} + +// GetResponseHash returns the response hash. +func (h *AckHeader) GetResponseHash() hash.Hash { + return h.ResponseHash } // SignedAckHeader defines client signed ack entity. @@ -51,24 +77,11 @@ type AckResponse struct{} // Verify checks hash and signature in ack header. func (sh *SignedAckHeader) Verify() (err error) { - // verify response - if err = sh.Response.Verify(); err != nil { - return - } - return sh.DefaultHashSignVerifierImpl.Verify(&sh.AckHeader) } // Sign the request. -func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { - // Only used by ack worker, and ack.Header is verified before build ack - if verifyReqHeader { - // check original header signature - if err = sh.Response.Verify(); err != nil { - return - } - } - +func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey) (err error) { return sh.DefaultHashSignVerifierImpl.Sign(&sh.AckHeader, signer) } @@ -78,22 +91,7 @@ func (a *Ack) Verify() error { } // Sign the request. -func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { +func (a *Ack) Sign(signer *asymmetric.PrivateKey) (err error) { // sign - return a.Header.Sign(signer, verifyReqHeader) -} - -// ResponseHash returns the deep shadowed Response Hash field. -func (sh *SignedAckHeader) ResponseHash() hash.Hash { - return sh.AckHeader.Response.Hash() -} - -// SignedRequestHeader returns the deep shadowed Request reference. -func (sh *SignedAckHeader) SignedRequestHeader() *SignedRequestHeader { - return &sh.AckHeader.Response.Request -} - -// SignedResponseHeader returns the Response reference. -func (sh *SignedAckHeader) SignedResponseHeader() *SignedResponseHeader { - return &sh.Response + return a.Header.Sign(signer) } diff --git a/types/ack_type_gen.go b/types/ack_type_gen.go index 90fc7a814..d623af12b 100644 --- a/types/ack_type_gen.go +++ b/types/ack_type_gen.go @@ -11,14 +11,20 @@ func (z *Ack) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.AckHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -28,7 +34,7 @@ func (z *Ack) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Ack) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 10 + z.Header.AckHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -36,27 +42,30 @@ func (z *Ack) Msgsize() (s int) { func (z *AckHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) + // map header, size 4 + o = append(o, 0x84) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if oTemp, err := z.Response.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.ResponseHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AckHeader) Msgsize() (s int) { - s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 13 + z.ResponseHash.Msgsize() + 10 + hsp.TimeSize return } @@ -80,22 +89,12 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x82, 0x83, 0x83) - if oTemp, err := z.AckHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.AckHeader.NodeID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.AckHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendTime(o, z.AckHeader.Timestamp) - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -106,6 +105,6 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedAckHeader) Msgsize() (s int) { - s = 1 + 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 10 + z.AckHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/baseaccount_gen.go b/types/baseaccount_gen.go index 857c98fd1..e1a0955f4 100644 --- a/types/baseaccount_gen.go +++ b/types/baseaccount_gen.go @@ -11,13 +11,12 @@ func (z *BaseAccount) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Account.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { diff --git a/types/billing_gen.go b/types/billing_gen.go index bc636474f..6d5d57f6b 100644 --- a/types/billing_gen.go +++ b/types/billing_gen.go @@ -11,20 +11,18 @@ func (z *Billing) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.BillingHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *Billing) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Billing) Msgsize() (s int) { - s = 1 + 14 + z.BillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 14 + z.BillingHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,13 +41,26 @@ func (z *BillingHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.BillingRequest.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) + o = hsp.AppendArrayHeader(o, uint32(len(z.Fees))) + for za0002 := range z.Fees { + o = hsp.AppendUint64(o, z.Fees[za0002]) + } + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Receivers))) for za0001 := range z.Receivers { if z.Receivers[za0001] == nil { @@ -62,34 +73,16 @@ func (z *BillingHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x86) - o = hsp.AppendArrayHeader(o, uint32(len(z.Fees))) - for za0002 := range z.Fees { - o = hsp.AppendUint64(o, z.Fees[za0002]) - } - o = append(o, 0x86) o = hsp.AppendArrayHeader(o, uint32(len(z.Rewards))) for za0003 := range z.Rewards { o = hsp.AppendUint64(o, z.Rewards[za0003]) } - o = append(o, 0x86) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x86) - if oTemp, err := z.Producer.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingHeader) Msgsize() (s int) { - s = 1 + 15 + z.BillingRequest.Msgsize() + 10 + hsp.ArrayHeaderSize + s = 1 + 15 + z.BillingRequest.Msgsize() + 5 + hsp.ArrayHeaderSize + (len(z.Fees) * (hsp.Uint64Size)) + 6 + z.Nonce.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.ArrayHeaderSize for za0001 := range z.Receivers { if z.Receivers[za0001] == nil { s += hsp.NilSize @@ -97,6 +90,6 @@ func (z *BillingHeader) Msgsize() (s int) { s += z.Receivers[za0001].Msgsize() } } - s += 5 + hsp.ArrayHeaderSize + (len(z.Fees) * (hsp.Uint64Size)) + 8 + hsp.ArrayHeaderSize + (len(z.Rewards) * (hsp.Uint64Size)) + 6 + z.Nonce.Msgsize() + 9 + z.Producer.Msgsize() + s += 8 + hsp.ArrayHeaderSize + (len(z.Rewards) * (hsp.Uint64Size)) return } diff --git a/types/billing_request_gen.go b/types/billing_request_gen.go index 48da8b034..9a631c25f 100644 --- a/types/billing_request_gen.go +++ b/types/billing_request_gen.go @@ -11,66 +11,62 @@ func (z *BillingRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Signees))) - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { + if oTemp, err := z.RequestHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendArrayHeader(o, uint32(len(z.Signatures))) + for za0002 := range z.Signatures { + if z.Signatures[za0002] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signees[za0001].MarshalHash(); err != nil { + if oTemp, err := z.Signatures[za0002].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Signatures))) - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.Signees))) + for za0001 := range z.Signees { + if z.Signees[za0001] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signatures[za0002].MarshalHash(); err != nil { + if oTemp, err := z.Signees[za0001].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) - if oTemp, err := z.RequestHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 8 + hsp.ArrayHeaderSize - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { + s = 1 + 7 + z.Header.Msgsize() + 12 + z.RequestHash.Msgsize() + 11 + hsp.ArrayHeaderSize + for za0002 := range z.Signatures { + if z.Signatures[za0002] == nil { s += hsp.NilSize } else { - s += z.Signees[za0001].Msgsize() + s += z.Signatures[za0002].Msgsize() } } - s += 11 + hsp.ArrayHeaderSize - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Signees { + if z.Signees[za0001] == nil { s += hsp.NilSize } else { - s += z.Signatures[za0002].Msgsize() + s += z.Signees[za0001].Msgsize() } } - s += 12 + z.RequestHash.Msgsize() return } @@ -79,7 +75,12 @@ func (z *BillingRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.GasAmounts))) for za0001 := range z.GasAmounts { if z.GasAmounts[za0001] == nil { @@ -92,34 +93,24 @@ func (z *BillingRequestHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x86) - if oTemp, err := z.LowBlock.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x86) if oTemp, err := z.HighBlock.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.LowHeight) - o = append(o, 0x86) o = hsp.AppendInt32(o, z.HighHeight) - o = append(o, 0x86) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + if oTemp, err := z.LowBlock.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendInt32(o, z.LowHeight) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingRequestHeader) Msgsize() (s int) { - s = 1 + 11 + hsp.ArrayHeaderSize + s = 1 + 11 + z.DatabaseID.Msgsize() + 11 + hsp.ArrayHeaderSize for za0001 := range z.GasAmounts { if z.GasAmounts[za0001] == nil { s += hsp.NilSize @@ -127,6 +118,6 @@ func (z *BillingRequestHeader) Msgsize() (s int) { s += z.GasAmounts[za0001].Msgsize() } } - s += 9 + z.LowBlock.Msgsize() + 10 + z.HighBlock.Msgsize() + 10 + hsp.Int32Size + 11 + hsp.Int32Size + 11 + z.DatabaseID.Msgsize() + s += 10 + z.HighBlock.Msgsize() + 11 + hsp.Int32Size + 9 + z.LowBlock.Msgsize() + 10 + hsp.Int32Size return } diff --git a/types/block_gen.go b/types/block_gen.go index bf8ef8bd6..685522af3 100644 --- a/types/block_gen.go +++ b/types/block_gen.go @@ -11,33 +11,19 @@ func (z *Block) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - // map header, size 2 - o = append(o, 0x84, 0x84, 0x82, 0x82) - if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) - for za0002 := range z.QueryTxs { - if z.QueryTxs[za0002] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { + if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.FailedReqs))) for za0001 := range z.FailedReqs { if z.FailedReqs[za0001] == nil { @@ -50,30 +36,41 @@ func (z *Block) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) - for za0003 := range z.Acks { - if z.Acks[za0003] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { + if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } + // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 1 + 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() + 9 + hsp.ArrayHeaderSize - for za0002 := range z.QueryTxs { - if z.QueryTxs[za0002] == nil { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { s += hsp.NilSize } else { - s += z.QueryTxs[za0002].Msgsize() + s += z.Acks[za0003].Msgsize() } } s += 11 + hsp.ArrayHeaderSize @@ -84,14 +81,15 @@ func (z *Block) Msgsize() (s int) { s += z.FailedReqs[za0001].Msgsize() } } - s += 5 + hsp.ArrayHeaderSize - for za0003 := range z.Acks { - if z.Acks[za0003] == nil { + s += 9 + hsp.ArrayHeaderSize + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { s += hsp.NilSize } else { - s += z.Acks[za0003].Msgsize() + s += z.QueryTxs[za0002].Msgsize() } } + s += 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() return } @@ -132,40 +130,35 @@ func (z *Header) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x86) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Header) Msgsize() (s int) { - s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -174,7 +167,7 @@ func (z *QueryAsTx) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if z.Request == nil { o = hsp.AppendNil(o) } else { @@ -184,7 +177,6 @@ func (z *QueryAsTx) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x82) if z.Response == nil { o = hsp.AppendNil(o) } else { @@ -219,14 +211,13 @@ func (z *SignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.HSV.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.HSV.MarshalHash(); err != nil { + if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -236,6 +227,6 @@ func (z *SignedHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedHeader) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 4 + z.HSV.Msgsize() + s = 1 + 4 + z.HSV.Msgsize() + 7 + z.Header.Msgsize() return } diff --git a/types/bp_block.go b/types/bp_block.go index 72c721e10..4fd05a3ad 100644 --- a/types/bp_block.go +++ b/types/bp_block.go @@ -22,6 +22,7 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/merkle" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -40,18 +41,23 @@ type BPHeader struct { // BPSignedHeader defines the main chain header with the signature. type BPSignedHeader struct { BPHeader - BlockHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } -// Verify verifies the signature. -func (s *BPSignedHeader) Verify() error { - if !s.Signature.Verify(s.BlockHash[:], s.Signee) { - return ErrSignVerification - } +func (s *BPSignedHeader) verifyHash() error { + return s.DefaultHashSignVerifierImpl.VerifyHash(&s.BPHeader) +} - return nil +func (s *BPSignedHeader) verify() error { + return s.DefaultHashSignVerifierImpl.Verify(&s.BPHeader) +} + +func (s *BPSignedHeader) setHash() error { + return s.DefaultHashSignVerifierImpl.SetHash(&s.BPHeader) +} + +func (s *BPSignedHeader) sign(signer *asymmetric.PrivateKey) error { + return s.DefaultHashSignVerifierImpl.Sign(&s.BPHeader, signer) } // BPBlock defines the main chain block. @@ -73,47 +79,45 @@ func (b *BPBlock) GetTxHashes() []*hash.Hash { return hs } -// PackAndSignBlock computes block's hash and sign it. -func (b *BPBlock) PackAndSignBlock(signer *asymmetric.PrivateKey) error { - hs := b.GetTxHashes() - - b.SignedHeader.MerkleRoot = *merkle.NewMerkle(hs).GetRoot() - enc, err := b.SignedHeader.BPHeader.MarshalHash() +func (b *BPBlock) setMerkleRoot() { + var merkleRoot = merkle.NewMerkle(b.GetTxHashes()).GetRoot() + b.SignedHeader.MerkleRoot = *merkleRoot +} - if err != nil { - return err +func (b *BPBlock) verifyMerkleRoot() error { + var merkleRoot = *merkle.NewMerkle(b.GetTxHashes()).GetRoot() + if !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { + return ErrMerkleRootVerification } + return nil +} - b.SignedHeader.BlockHash = hash.THashH(enc) - b.SignedHeader.Signature, err = signer.Sign(b.SignedHeader.BlockHash[:]) - b.SignedHeader.Signee = signer.PubKey() +// SetHash sets the block header hash, including the merkle root of the packed transactions. +func (b *BPBlock) SetHash() error { + b.setMerkleRoot() + return b.SignedHeader.setHash() +} - if err != nil { +// VerifyHash verifies the block header hash, including the merkle root of the packed transactions. +func (b *BPBlock) VerifyHash() error { + if err := b.verifyMerkleRoot(); err != nil { return err } + return b.SignedHeader.verifyHash() +} - return nil +// PackAndSignBlock computes block's hash and sign it. +func (b *BPBlock) PackAndSignBlock(signer *asymmetric.PrivateKey) error { + b.setMerkleRoot() + return b.SignedHeader.sign(signer) } // Verify verifies whether the block is valid. func (b *BPBlock) Verify() error { - hs := b.GetTxHashes() - merkleRoot := *merkle.NewMerkle(hs).GetRoot() - if !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { - return ErrMerkleRootVerification - } - - enc, err := b.SignedHeader.BPHeader.MarshalHash() - if err != nil { + if err := b.verifyMerkleRoot(); err != nil { return err } - - h := hash.THashH(enc) - if !h.IsEqual(&b.SignedHeader.BlockHash) { - return ErrHashVerification - } - - return b.SignedHeader.Verify() + return b.SignedHeader.verify() } // Timestamp returns timestamp of block. @@ -133,5 +137,5 @@ func (b *BPBlock) ParentHash() *hash.Hash { // BlockHash returns the parent hash field of the block header. func (b *BPBlock) BlockHash() *hash.Hash { - return &b.SignedHeader.BlockHash + return &b.SignedHeader.DataHash } diff --git a/types/bp_block_gen.go b/types/bp_block_gen.go index 5afbe938a..2e9ac9701 100644 --- a/types/bp_block_gen.go +++ b/types/bp_block_gen.go @@ -11,13 +11,18 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 + // map header, size 2 o = append(o, 0x82, 0x82) - if oTemp, err := z.SignedHeader.MarshalHash(); err != nil { + if oTemp, err := z.SignedHeader.BPHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.Transactions))) for za0001 := range z.Transactions { if oTemp, err := z.Transactions[za0001].MarshalHash(); err != nil { @@ -31,7 +36,7 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPBlock) Msgsize() (s int) { - s = 1 + 13 + z.SignedHeader.Msgsize() + 13 + hsp.ArrayHeaderSize + s = 1 + 13 + 1 + 9 + z.SignedHeader.BPHeader.Msgsize() + 28 + z.SignedHeader.DefaultHashSignVerifierImpl.Msgsize() + 13 + hsp.ArrayHeaderSize for za0001 := range z.Transactions { s += z.Transactions[za0001].Msgsize() } @@ -43,34 +48,30 @@ func (z *BPHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) + o = append(o, 0x85) if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x85) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPHeader) Msgsize() (s int) { - s = 1 + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -78,35 +79,14 @@ func (z *BPHeader) Msgsize() (s int) { func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) + // map header, size 2 + o = append(o, 0x82) if oTemp, err := z.BPHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.BlockHash.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -116,18 +96,6 @@ func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPSignedHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 9 + z.BPHeader.Msgsize() + 10 + z.BlockHash.Msgsize() + s = 1 + 9 + z.BPHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/bp_block_test.go b/types/bp_block_test.go index 60bf2d793..3c492dd97 100644 --- a/types/bp_block_test.go +++ b/types/bp_block_test.go @@ -22,7 +22,9 @@ import ( "reflect" "testing" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" ) func TestHeader_MarshalUnmarshalBinary(t *testing.T) { @@ -114,14 +116,50 @@ func TestBlock_PackAndSignBlock(t *testing.T) { t.Fatalf("failed to generate block: %v", err) } + err = block.verifyMerkleRoot() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + err = block.VerifyHash() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + err = block.Verify() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + block.SignedHeader.DataHash[0]++ err = block.Verify() + if errors.Cause(err) != verifier.ErrHashValueNotMatch { + t.Fatalf("unexpected error: %v", err) + } + err = block.VerifyHash() + if errors.Cause(err) != verifier.ErrHashValueNotMatch { + t.Fatalf("unexpected error: %v", err) + } + err = block.SetHash() + if err != nil { + t.Fatalf("failed to set hash: %v", err) + } + err = block.VerifyHash() if err != nil { t.Fatalf("failed to verify: %v", err) } - block.SignedHeader.BlockHash[0]++ + block.SignedHeader.MerkleRoot[0]++ err = block.Verify() - if err != ErrHashVerification { + if err != ErrMerkleRootVerification { + t.Fatalf("unexpected error: %v", err) + } + err = block.VerifyHash() + if err != ErrMerkleRootVerification { + t.Fatalf("unexpected error: %v", err) + } + err = block.verifyMerkleRoot() + if err != ErrMerkleRootVerification { t.Fatalf("unexpected error: %v", err) } diff --git a/types/bprpc.go b/types/bprpc.go index ba788d744..a68025267 100644 --- a/types/bprpc.go +++ b/types/bprpc.go @@ -18,6 +18,8 @@ package types import ( "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -104,7 +106,9 @@ type NextAccountNonceResp struct { // AddTxReq defines a request of the AddTx RPC method. type AddTxReq struct { proto.Envelope - Tx interfaces.Transaction + + TTL uint32 // defines the broadcast TTL on BP network. + Tx interfaces.Transaction } // AddTxResp defines a response of the AddTx RPC method. @@ -168,3 +172,16 @@ type QuerySQLChainProfileResp struct { proto.Envelope Profile SQLChainProfile } + +// QueryTxStateReq defines a request of the QueryTxState RPC method. +type QueryTxStateReq struct { + proto.Envelope + Hash hash.Hash +} + +// QueryTxStateResp defines a response of the QueryTxState RPC method. +type QueryTxStateResp struct { + proto.Envelope + Hash hash.Hash + State pi.TransactionState +} diff --git a/types/createdb_gen.go b/types/createdb_gen.go index f72127991..1fe24735b 100644 --- a/types/createdb_gen.go +++ b/types/createdb_gen.go @@ -11,20 +11,18 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.CreateDatabaseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabase) Msgsize() (s int) { - s = 1 + 21 + z.CreateDatabaseHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 21 + z.CreateDatabaseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,39 +41,34 @@ func (z *CreateDatabaseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) - if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + o = append(o, 0x86) + o = hsp.AppendUint64(o, z.AdvancePayment) + o = hsp.AppendUint64(o, z.GasPrice) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + if oTemp, err := z.Owner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.Owner.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x86) - o = hsp.AppendUint64(o, z.AdvancePayment) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseHeader) Msgsize() (s int) { - s = 1 + 13 + z.ResourceMeta.Msgsize() + 10 + z.TokenType.Msgsize() + 6 + z.Nonce.Msgsize() + 6 + z.Owner.Msgsize() + 9 + hsp.Uint64Size + 15 + hsp.Uint64Size + s = 1 + 15 + hsp.Uint64Size + 9 + hsp.Uint64Size + 6 + z.Nonce.Msgsize() + 6 + z.Owner.Msgsize() + 13 + z.ResourceMeta.Msgsize() + 10 + z.TokenType.Msgsize() return } diff --git a/types/db_service_types_gen.go b/types/db_service_types_gen.go index 188fcada8..d072402d4 100644 --- a/types/db_service_types_gen.go +++ b/types/db_service_types_gen.go @@ -11,22 +11,21 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -36,7 +35,7 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -45,7 +44,7 @@ func (z *CreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -65,22 +64,21 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -90,7 +88,7 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -99,7 +97,7 @@ func (z *CreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -119,22 +117,21 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -144,7 +141,7 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DropDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -153,7 +150,7 @@ func (z *DropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { @@ -188,22 +185,21 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -213,7 +209,7 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -222,7 +218,7 @@ func (z *GetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { @@ -242,22 +238,21 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -267,7 +262,7 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -276,7 +271,7 @@ func (z *GetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -297,13 +292,12 @@ func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) o = hsp.Require(b, z.Msgsize()) // map header, size 2 // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x82, 0x81) if oTemp, err := z.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -324,13 +318,12 @@ func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) o = hsp.Require(b, z.Msgsize()) // map header, size 2 // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x82, 0x81) if oTemp, err := z.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -350,15 +343,15 @@ func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -368,7 +361,7 @@ func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() return } @@ -377,15 +370,15 @@ func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -395,7 +388,7 @@ func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() return } @@ -404,15 +397,15 @@ func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -422,6 +415,6 @@ func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseResponseHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() return } diff --git a/types/init_service_type.go b/types/init_service_type.go index 3267445b5..5a94439df 100644 --- a/types/init_service_type.go +++ b/types/init_service_type.go @@ -39,6 +39,7 @@ type ResourceMeta struct { EncryptionKey string // encryption key for database instance UseEventualConsistency bool // use eventual consistency replication if enabled ConsistencyLevel float64 // customized strong consistency level + IsolationLevel int // customized isolation level } // ServiceInstance defines single instance to be initialized. diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go index b289ec50d..dcb38de3c 100644 --- a/types/init_service_type_gen.go +++ b/types/init_service_type_gen.go @@ -11,7 +11,7 @@ func (z *InitService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { @@ -31,8 +31,18 @@ func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x81, 0x82, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Header.InitServiceResponseHeader.Instances))) + for za0001 := range z.Header.InitServiceResponseHeader.Instances { + if oTemp, err := z.Header.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -42,7 +52,11 @@ func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *InitServiceResponse) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + s = 1 + 7 + 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.Header.InitServiceResponseHeader.Instances { + s += z.Header.InitServiceResponseHeader.Instances[za0001].Msgsize() + } + s += 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -51,7 +65,7 @@ func (z *InitServiceResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Instances))) for za0001 := range z.Instances { if oTemp, err := z.Instances[za0001].MarshalHash(); err != nil { @@ -76,8 +90,15 @@ func (z *InitServiceResponseHeader) Msgsize() (s int) { func (z *ResourceMeta) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88, 0x88) + // map header, size 9 + o = append(o, 0x89) + o = hsp.AppendFloat64(o, z.ConsistencyLevel) + o = hsp.AppendString(o, z.EncryptionKey) + o = hsp.AppendInt(o, z.IsolationLevel) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + o = hsp.AppendUint16(o, z.Node) + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetMiners))) for za0001 := range z.TargetMiners { if oTemp, err := z.TargetMiners[za0001].MarshalHash(); err != nil { @@ -86,30 +107,17 @@ func (z *ResourceMeta) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x88) o = hsp.AppendBool(o, z.UseEventualConsistency) - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.ConsistencyLevel) - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x88) - o = hsp.AppendString(o, z.EncryptionKey) - o = append(o, 0x88) - o = hsp.AppendUint16(o, z.Node) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResourceMeta) Msgsize() (s int) { - s = 1 + 13 + hsp.ArrayHeaderSize + s = 1 + 17 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 15 + hsp.IntSize + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 13 + hsp.ArrayHeaderSize for za0001 := range z.TargetMiners { s += z.TargetMiners[za0001].Msgsize() } - s += 23 + hsp.BoolSize + 17 + hsp.Float64Size + 14 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 23 + hsp.BoolSize return } @@ -118,7 +126,12 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if z.GenesisBlock == nil { o = hsp.AppendNil(o) } else { @@ -128,7 +141,6 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) if z.Peers == nil { o = hsp.AppendNil(o) } else { @@ -138,24 +150,17 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ServiceInstance) Msgsize() (s int) { - s = 1 + 13 + s = 1 + 11 + z.DatabaseID.Msgsize() + 13 if z.GenesisBlock == nil { s += hsp.NilSize } else { @@ -167,7 +172,7 @@ func (z *ServiceInstance) Msgsize() (s int) { } else { s += z.Peers.Msgsize() } - s += 13 + z.ResourceMeta.Msgsize() + 11 + z.DatabaseID.Msgsize() + s += 13 + z.ResourceMeta.Msgsize() return } @@ -176,8 +181,14 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.InitServiceResponseHeader.Instances))) for za0001 := range z.InitServiceResponseHeader.Instances { if oTemp, err := z.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { @@ -186,21 +197,14 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 10 + hsp.ArrayHeaderSize for za0001 := range z.InitServiceResponseHeader.Instances { s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() } - s += 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/issuekeys_gen.go b/types/issuekeys_gen.go index 79e86df52..829f88745 100644 --- a/types/issuekeys_gen.go +++ b/types/issuekeys_gen.go @@ -11,20 +11,18 @@ func (z *IssueKeys) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.IssueKeysHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.IssueKeysHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *IssueKeys) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *IssueKeys) Msgsize() (s int) { - s = 1 + 16 + z.IssueKeysHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 16 + z.IssueKeysHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,26 +41,23 @@ func (z *IssueKeysHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) o = hsp.AppendArrayHeader(o, uint32(len(z.MinerKeys))) for za0001 := range z.MinerKeys { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.MinerKeys[za0001].Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.MinerKeys[za0001].EncryptionKey) } - o = append(o, 0x83) if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TargetSQLChain.MarshalHash(); err != nil { return nil, err } else { @@ -86,19 +81,18 @@ func (z *MinerKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendString(o, z.EncryptionKey) if oTemp, err := z.Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendString(o, z.EncryptionKey) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerKey) Msgsize() (s int) { - s = 1 + 6 + z.Miner.Msgsize() + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + s = 1 + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 6 + z.Miner.Msgsize() return } diff --git a/types/no_ack_report_type.go b/types/no_ack_report_type.go deleted file mode 100644 index 29187a451..000000000 --- a/types/no_ack_report_type.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp - -// NoAckReportHeader defines worker issued client no ack report. -type NoAckReportHeader struct { - NodeID proto.NodeID // reporter node id - Timestamp time.Time // time in UTC zone - Response SignedResponseHeader -} - -// SignedNoAckReportHeader defines worker worker issued/signed client no ack report. -type SignedNoAckReportHeader struct { - NoAckReportHeader - verifier.DefaultHashSignVerifierImpl -} - -// NoAckReport defines whole worker no client ack report. -type NoAckReport struct { - proto.Envelope - Header SignedNoAckReportHeader -} - -// AggrNoAckReportHeader defines worker leader aggregated client no ack report. -type AggrNoAckReportHeader struct { - NodeID proto.NodeID // aggregated report node id - Timestamp time.Time // time in UTC zone - Reports []SignedNoAckReportHeader // no-ack reports - Peers *proto.Peers // serving peers during report -} - -// SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. -type SignedAggrNoAckReportHeader struct { - AggrNoAckReportHeader - verifier.DefaultHashSignVerifierImpl -} - -// AggrNoAckReport defines whole worker leader no client ack report. -type AggrNoAckReport struct { - proto.Envelope - Header SignedAggrNoAckReportHeader -} - -// Verify checks hash and signature in signed no ack report header. -func (sh *SignedNoAckReportHeader) Verify() (err error) { - // verify original response - if err = sh.Response.Verify(); err != nil { - return - } - - return sh.DefaultHashSignVerifierImpl.Verify(&sh.NoAckReportHeader) -} - -// Sign the request. -func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // verify original response - if err = sh.Response.Verify(); err != nil { - return - } - - return sh.DefaultHashSignVerifierImpl.Sign(&sh.NoAckReportHeader, signer) -} - -// Verify checks hash and signature in whole no ack report. -func (r *NoAckReport) Verify() error { - return r.Header.Verify() -} - -// Sign the request. -func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { - return r.Header.Sign(signer) -} - -// Verify checks hash and signature in aggregated no ack report. -func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { - // verify original reports - for _, r := range sh.Reports { - if err = r.Verify(); err != nil { - return - } - } - - return sh.DefaultHashSignVerifierImpl.Verify(&sh.AggrNoAckReportHeader) -} - -// Sign the request. -func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - for _, r := range sh.Reports { - if err = r.Verify(); err != nil { - return - } - } - - return sh.DefaultHashSignVerifierImpl.Sign(&sh.AggrNoAckReportHeader, signer) -} - -// Verify the whole aggregation no ack report. -func (r *AggrNoAckReport) Verify() (err error) { - return r.Header.Verify() -} - -// Sign the request. -func (r *AggrNoAckReport) Sign(signer *asymmetric.PrivateKey) error { - return r.Header.Sign(signer) -} diff --git a/types/no_ack_report_type_gen.go b/types/no_ack_report_type_gen.go deleted file mode 100644 index e9e89abc6..000000000 --- a/types/no_ack_report_type_gen.go +++ /dev/null @@ -1,206 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82, 0x82) - if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AggrNoAckReport) Msgsize() (s int) { - s = 1 + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Peers == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Peers.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Reports))) - for za0001 := range z.Reports { - if oTemp, err := z.Reports[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x84) - o = hsp.AppendTime(o, z.Timestamp) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AggrNoAckReportHeader) Msgsize() (s int) { - s = 1 + 6 - if z.Peers == nil { - s += hsp.NilSize - } else { - s += z.Peers.Msgsize() - } - s += 8 + hsp.ArrayHeaderSize - for za0001 := range z.Reports { - s += z.Reports[za0001].Msgsize() - } - s += 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize - return -} - -// MarshalHash marshals for hash -func (z *NoAckReport) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *NoAckReport) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *NoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendTime(o, z.Timestamp) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *NoAckReportHeader) Msgsize() (s int) { - s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize - return -} - -// MarshalHash marshals for hash -func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.AggrNoAckReportHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { - s = 1 + 22 + z.AggrNoAckReportHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x82, 0x83, 0x83) - if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) - o = append(o, 0x83) - if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SignedNoAckReportHeader) Msgsize() (s int) { - s = 1 + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() - return -} diff --git a/types/no_ack_report_type_gen_test.go b/types/no_ack_report_type_gen_test.go deleted file mode 100644 index 26a9ce408..000000000 --- a/types/no_ack_report_type_gen_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "testing" -) - -func TestMarshalHashAggrNoAckReport(t *testing.T) { - v := AggrNoAckReport{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashAggrNoAckReport(b *testing.B) { - v := AggrNoAckReport{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgAggrNoAckReport(b *testing.B) { - v := AggrNoAckReport{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashAggrNoAckReportHeader(t *testing.T) { - v := AggrNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashAggrNoAckReportHeader(b *testing.B) { - v := AggrNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgAggrNoAckReportHeader(b *testing.B) { - v := AggrNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashNoAckReport(t *testing.T) { - v := NoAckReport{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashNoAckReport(b *testing.B) { - v := NoAckReport{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgNoAckReport(b *testing.B) { - v := NoAckReport{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashNoAckReportHeader(t *testing.T) { - v := NoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashNoAckReportHeader(b *testing.B) { - v := NoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgNoAckReportHeader(b *testing.B) { - v := NoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashSignedAggrNoAckReportHeader(t *testing.T) { - v := SignedAggrNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashSignedAggrNoAckReportHeader(b *testing.B) { - v := SignedAggrNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgSignedAggrNoAckReportHeader(b *testing.B) { - v := SignedAggrNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashSignedNoAckReportHeader(t *testing.T) { - v := SignedNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashSignedNoAckReportHeader(b *testing.B) { - v := SignedNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgSignedNoAckReportHeader(b *testing.B) { - v := SignedNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} diff --git a/types/provideservice_gen.go b/types/provideservice_gen.go index c3925d3b1..78531baea 100644 --- a/types/provideservice_gen.go +++ b/types/provideservice_gen.go @@ -11,20 +11,18 @@ func (z *ProvideService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.ProvideServiceHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.ProvideServiceHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *ProvideService) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProvideService) Msgsize() (s int) { - s = 1 + 21 + z.ProvideServiceHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.ProvideServiceHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,13 +41,21 @@ func (z *ProvideServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.GasPrice) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetUser))) for za0001 := range z.TargetUser { if oTemp, err := z.TargetUser[za0001].MarshalHash(); err != nil { @@ -58,35 +64,20 @@ func (z *ProvideServiceHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x88) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProvideServiceHeader) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 11 + hsp.ArrayHeaderSize + s = 1 + 9 + hsp.Uint64Size + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 6 + z.Nonce.Msgsize() + 6 + hsp.Uint64Size + 11 + hsp.ArrayHeaderSize for za0001 := range z.TargetUser { s += z.TargetUser[za0001].Msgsize() } - s += 14 + hsp.Float64Size + 6 + z.Nonce.Msgsize() + 7 + z.NodeID.Msgsize() + 9 + hsp.Uint64Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 10 + z.TokenType.Msgsize() return } diff --git a/types/request_type.go b/types/request_type.go index 241d7c982..12f121277 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -69,6 +69,15 @@ type RequestHeader struct { QueriesHash hash.Hash `json:"qh"` // hash of query payload } +// GetQueryKey returns a unique query key of this request. +func (h *RequestHeader) GetQueryKey() QueryKey { + return QueryKey{ + NodeID: h.NodeID, + ConnectionID: h.ConnectionID, + SeqNo: h.SeqNo, + } +} + // QueryKey defines an unique query key of a request. type QueryKey struct { NodeID proto.NodeID `json:"id"` @@ -77,8 +86,8 @@ type QueryKey struct { } // String implements fmt.Stringer for logging purpose. -func (k *QueryKey) String() string { - return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[:8]), k.ConnectionID, k.SeqNo) +func (k QueryKey) String() string { + return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[len(k.NodeID)-8:]), k.ConnectionID, k.SeqNo) } // SignedRequestHeader defines a signed query request header. @@ -90,8 +99,9 @@ type SignedRequestHeader struct { // Request defines a complete query request. type Request struct { proto.Envelope - Header SignedRequestHeader `json:"h"` - Payload RequestPayload `json:"p"` + Header SignedRequestHeader `json:"h"` + Payload RequestPayload `json:"p"` + _marshalCache []byte `json:"-"` } // String implements fmt.Stringer for logging purpose. @@ -139,11 +149,12 @@ func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } -// GetQueryKey returns a unique query key of this request. -func (sh *SignedRequestHeader) GetQueryKey() QueryKey { - return QueryKey{ - NodeID: sh.NodeID, - ConnectionID: sh.ConnectionID, - SeqNo: sh.SeqNo, - } +// SetMarshalCache sets _marshalCache +func (r *Request) SetMarshalCache(buf []byte) { + r._marshalCache = buf +} + +// GetMarshalCache gets _marshalCache +func (r *Request) GetMarshalCache() (buf []byte) { + return r._marshalCache } diff --git a/types/request_type_gen.go b/types/request_type_gen.go index 709e66bb9..d8cc7da03 100644 --- a/types/request_type_gen.go +++ b/types/request_type_gen.go @@ -11,19 +11,18 @@ func (z NamedArg) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) o, err = hsp.AppendIntf(o, z.Value) if err != nil { return } - o = append(o, 0x82) - o = hsp.AppendString(o, z.Name) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z NamedArg) Msgsize() (s int) { - s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + s = 1 + 5 + hsp.StringPrefixSize + len(z.Name) + 6 + hsp.GuessSize(z.Value) return } @@ -32,19 +31,17 @@ func (z *Query) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) for za0001 := range z.Args { // map header, size 2 - o = append(o, 0x82, 0x82) - o = hsp.AppendString(o, z.Args[za0001].Name) o = append(o, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) o, err = hsp.AppendIntf(o, z.Args[za0001].Value) if err != nil { return } } - o = append(o, 0x82) o = hsp.AppendString(o, z.Pattern) return } @@ -64,22 +61,20 @@ func (z *QueryKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.ConnectionID) if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x83) o = hsp.AppendUint64(o, z.SeqNo) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *QueryKey) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + s = 1 + 13 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 6 + hsp.Uint64Size return } @@ -102,45 +97,43 @@ func (z *Request) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - // map header, size 1 - o = append(o, 0x83, 0x83, 0x81, 0x81) - o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) - for za0001 := range z.Payload.Queries { - if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - // map header, size 2 - o = append(o, 0x83, 0x82, 0x82) - if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 1 + o = append(o, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Request) Msgsize() (s int) { - s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 8 + 1 + 8 + hsp.ArrayHeaderSize for za0001 := range z.Payload.Queries { s += z.Payload.Queries[za0001].Msgsize() } - s += 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -149,40 +142,33 @@ func (z *RequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) - o = hsp.AppendInt32(o, int32(z.QueryType)) o = append(o, 0x88) - if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { + o = hsp.AppendUint64(o, z.BatchCount) + o = hsp.AppendUint64(o, z.ConnectionID) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x88) + o = hsp.AppendInt32(o, int32(z.QueryType)) o = hsp.AppendUint64(o, z.SeqNo) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.BatchCount) + o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *RequestHeader) Msgsize() (s int) { - s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size + s = 1 + 11 + hsp.Uint64Size + 13 + hsp.Uint64Size + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 12 + z.QueriesHash.Msgsize() + 10 + hsp.Int32Size + 6 + hsp.Uint64Size + 10 + hsp.TimeSize return } @@ -191,7 +177,7 @@ func (z *RequestPayload) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) for za0001 := range z.Queries { if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { @@ -217,14 +203,13 @@ func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -234,6 +219,6 @@ func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedRequestHeader) Msgsize() (s int) { - s = 1 + 14 + z.RequestHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 14 + z.RequestHeader.Msgsize() return } diff --git a/types/response_type.go b/types/response_type.go index 968385eca..43b086c7e 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -19,9 +19,7 @@ package types import ( "time" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/pkg/errors" ) @@ -42,20 +40,49 @@ type ResponsePayload struct { // ResponseHeader defines a query response header. type ResponseHeader struct { - Request SignedRequestHeader `json:"r"` - NodeID proto.NodeID `json:"id"` // response node id - Timestamp time.Time `json:"t"` // time in UTC zone - RowCount uint64 `json:"c"` // response row count of payload - LogOffset uint64 `json:"o"` // request log offset - LastInsertID int64 `json:"l"` // insert insert id - AffectedRows int64 `json:"a"` // affected rows - PayloadHash hash.Hash `json:"dh"` // hash of query response payload + Request RequestHeader `json:"r"` + RequestHash hash.Hash `json:"rh"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + PayloadHash hash.Hash `json:"dh"` // hash of query response payload + ResponseAccount proto.AccountAddress `json:"aa"` // response account +} + +// GetRequestHash returns the request hash. +func (h *ResponseHeader) GetRequestHash() hash.Hash { + return h.RequestHash +} + +// GetRequestTimestamp returns the request timestamp. +func (h *ResponseHeader) GetRequestTimestamp() time.Time { + return h.Request.Timestamp } // SignedResponseHeader defines a signed query response header. type SignedResponseHeader struct { ResponseHeader - verifier.DefaultHashSignVerifierImpl + ResponseHash hash.Hash +} + +// Hash returns the response header hash. +func (sh *SignedResponseHeader) Hash() hash.Hash { + return sh.ResponseHash +} + +// VerifyHash verify the hash of the response. +func (sh *SignedResponseHeader) VerifyHash() (err error) { + return errors.Wrap(verifyHash(&sh.ResponseHeader, &sh.ResponseHash), + "verify response header hash failed") +} + +// BuildHash computes the hash of the response header. +func (sh *SignedResponseHeader) BuildHash() (err error) { + return errors.Wrap(buildHash(&sh.ResponseHeader, &sh.ResponseHash), + "compute response header hash failed") } // Response defines a complete query response. @@ -64,47 +91,32 @@ type Response struct { Payload ResponsePayload `json:"p"` } -// Verify checks hash and signature in response header. -func (sh *SignedResponseHeader) Verify() (err error) { - // verify original request header - if err = sh.Request.Verify(); err != nil { - return - } - - return sh.DefaultHashSignVerifierImpl.Verify(&sh.ResponseHeader) -} +// BuildHash computes the hash of the response. +func (r *Response) BuildHash() (err error) { + // set rows count + r.Header.RowCount = uint64(len(r.Payload.Rows)) -// Sign the request. -func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // make sure original header is signed - if err = sh.Request.Verify(); err != nil { - err = errors.Wrapf(err, "SignedResponseHeader %v", sh) + // build hash in header + if err = buildHash(&r.Payload, &r.Header.PayloadHash); err != nil { + err = errors.Wrap(err, "compute response payload hash failed") return } - return sh.DefaultHashSignVerifierImpl.Sign(&sh.ResponseHeader, signer) + // compute header hash + return r.Header.BuildHash() } -// Verify checks hash and signature in whole response. -func (sh *Response) Verify() (err error) { - // verify data hash in header - if err = verifyHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { +// VerifyHash verify the hash of the response. +func (r *Response) VerifyHash() (err error) { + if err = verifyHash(&r.Payload, &r.Header.PayloadHash); err != nil { + err = errors.Wrap(err, "verify response payload hash failed") return } - return sh.Header.Verify() + return r.Header.VerifyHash() } -// Sign the request. -func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { - // set rows count - sh.Header.RowCount = uint64(len(sh.Payload.Rows)) - - // build hash in header - if err = buildHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { - return - } - - // sign the request - return sh.Header.Sign(signer) +// Hash returns the response header hash. +func (r *Response) Hash() hash.Hash { + return r.Header.Hash() } diff --git a/types/response_type_gen.go b/types/response_type_gen.go index 78c9db9c3..59fe39ae1 100644 --- a/types/response_type_gen.go +++ b/types/response_type_gen.go @@ -11,21 +11,19 @@ func (z *Response) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 + // map header, size 2 o = append(o, 0x82, 0x82) - if oTemp, err := z.Payload.MarshalHash(); err != nil { + if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82) - if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { + if oTemp, err := z.Header.ResponseHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Payload.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -35,7 +33,7 @@ func (z *Response) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Response) Msgsize() (s int) { - s = 1 + 8 + z.Payload.Msgsize() + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 13 + z.Header.ResponseHash.Msgsize() + 8 + z.Payload.Msgsize() return } @@ -43,41 +41,44 @@ func (z *Response) Msgsize() (s int) { func (z *ResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88, 0x88) - if oTemp, err := z.Request.MarshalHash(); err != nil { + // map header, size 10 + o = append(o, 0x8a) + o = hsp.AppendInt64(o, z.AffectedRows) + o = hsp.AppendInt64(o, z.LastInsertID) + o = hsp.AppendUint64(o, z.LogOffset) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) if oTemp, err := z.PayloadHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendInt64(o, z.LastInsertID) - o = append(o, 0x88) - o = hsp.AppendInt64(o, z.AffectedRows) - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.Request.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.RequestHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.ResponseAccount.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x88) o = hsp.AppendUint64(o, z.RowCount) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.LogOffset) + o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponseHeader) Msgsize() (s int) { - s = 1 + 8 + z.Request.Msgsize() + 12 + z.PayloadHash.Msgsize() + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size + s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 12 + z.RequestHash.Msgsize() + 16 + z.ResponseAccount.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize return } @@ -86,11 +87,19 @@ func (z *ResponsePayload) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) + for za0001 := range z.Columns { + o = hsp.AppendString(o, z.Columns[za0001]) + } + o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) + for za0002 := range z.DeclTypes { + o = hsp.AppendString(o, z.DeclTypes[za0002]) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Rows))) for za0003 := range z.Rows { // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Rows[za0003].Values))) for za0004 := range z.Rows[za0003].Values { o, err = hsp.AppendIntf(o, z.Rows[za0003].Values[za0004]) @@ -99,29 +108,12 @@ func (z *ResponsePayload) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) - o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) - for za0001 := range z.Columns { - o = hsp.AppendString(o, z.Columns[za0001]) - } - o = append(o, 0x83) - o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) - for za0002 := range z.DeclTypes { - o = hsp.AppendString(o, z.DeclTypes[za0002]) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponsePayload) Msgsize() (s int) { - s = 1 + 5 + hsp.ArrayHeaderSize - for za0003 := range z.Rows { - s += 1 + 7 + hsp.ArrayHeaderSize - for za0004 := range z.Rows[za0003].Values { - s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) - } - } - s += 8 + hsp.ArrayHeaderSize + s = 1 + 8 + hsp.ArrayHeaderSize for za0001 := range z.Columns { s += hsp.StringPrefixSize + len(z.Columns[za0001]) } @@ -129,6 +121,13 @@ func (z *ResponsePayload) Msgsize() (s int) { for za0002 := range z.DeclTypes { s += hsp.StringPrefixSize + len(z.DeclTypes[za0002]) } + s += 5 + hsp.ArrayHeaderSize + for za0003 := range z.Rows { + s += 1 + 7 + hsp.ArrayHeaderSize + for za0004 := range z.Rows[za0003].Values { + s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) + } + } return } @@ -137,7 +136,7 @@ func (z *ResponseRow) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Values))) for za0001 := range z.Values { o, err = hsp.AppendIntf(o, z.Values[za0001]) @@ -162,14 +161,13 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.ResponseHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -179,6 +177,6 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedResponseHeader) Msgsize() (s int) { - s = 1 + 15 + z.ResponseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 13 + z.ResponseHash.Msgsize() + 15 + z.ResponseHeader.Msgsize() return } diff --git a/types/transfer_gen.go b/types/transfer_gen.go index 8c918b251..0fcc22b0f 100644 --- a/types/transfer_gen.go +++ b/types/transfer_gen.go @@ -11,20 +11,18 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Transfer) Msgsize() (s int) { - s = 1 + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 15 + z.TransferHeader.Msgsize() return } @@ -43,37 +41,33 @@ func (z *TransferHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x85) + o = hsp.AppendUint64(o, z.Amount) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { + if oTemp, err := z.Receiver.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) if oTemp, err := z.Sender.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - if oTemp, err := z.Receiver.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Amount) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *TransferHeader) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 6 + z.Nonce.Msgsize() + 7 + z.Sender.Msgsize() + 9 + z.Receiver.Msgsize() + 7 + hsp.Uint64Size + s = 1 + 7 + hsp.Uint64Size + 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() + 7 + z.Sender.Msgsize() + 10 + z.TokenType.Msgsize() return } diff --git a/types/types_test.go b/types/types_test.go index 29724c951..2d243e2ee 100644 --- a/types/types_test.go +++ b/types/types_test.go @@ -25,10 +25,8 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" "github.com/ugorji/go/codec" ) @@ -155,21 +153,17 @@ func TestRequest_Sign(t *testing.T) { } func TestResponse_Sign(t *testing.T) { - privKey, _ := getCommKeys() - Convey("sign", t, func() { res := &Response{ Header: SignedResponseHeader{ ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, + Request: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), }, NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), Timestamp: time.Now().UTC(), @@ -216,20 +210,8 @@ func TestResponse_Sign(t *testing.T) { var err error - // sign directly, embedded original request is not filled - err = res.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign original request first - err = res.Header.Request.Sign(privKey) - So(err, ShouldBeNil) - - // sign again - err = res.Sign(privKey) + // sign + err = res.BuildHash() So(err, ShouldBeNil) // test hash @@ -238,7 +220,7 @@ func TestResponse_Sign(t *testing.T) { // verify Convey("verify", func() { - err = res.Verify() + err = res.BuildHash() So(err, ShouldBeNil) Convey("encode/decode verify", func() { @@ -247,25 +229,25 @@ func TestResponse_Sign(t *testing.T) { var r *Response err = utils.DecodeMsgPack(buf.Bytes(), &r) So(err, ShouldBeNil) - err = r.Verify() + err = r.VerifyHash() So(err, ShouldBeNil) }) Convey("request change", func() { res.Header.Request.BatchCount = 200 - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) Convey("payload change", func() { res.Payload.DeclTypes[0] = "INT" - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) Convey("header change", func() { res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) }) @@ -279,22 +261,18 @@ func TestAck_Sign(t *testing.T) { ack := &Ack{ Header: SignedAckHeader{ AckHeader: AckHeader{ - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), + Response: ResponseHeader{ + Request: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), }, NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), Timestamp: time.Now().UTC(), @@ -305,30 +283,13 @@ func TestAck_Sign(t *testing.T) { var err error Convey("get query key", func() { - key := ack.Header.SignedRequestHeader().GetQueryKey() - So(key.NodeID, ShouldEqual, ack.Header.SignedRequestHeader().NodeID) - So(key.ConnectionID, ShouldEqual, ack.Header.SignedRequestHeader().ConnectionID) - So(key.SeqNo, ShouldEqual, ack.Header.SignedRequestHeader().SeqNo) - }) - - // sign directly, embedded original response is not filled - err = ack.Sign(privKey, false) - So(err, ShouldBeNil) - err = ack.Sign(privKey, true) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, + key := ack.Header.GetQueryKey() + So(key.NodeID, ShouldEqual, ack.Header.GetQueryKey().NodeID) + So(key.ConnectionID, ShouldEqual, ack.Header.GetQueryKey().ConnectionID) + So(key.SeqNo, ShouldEqual, ack.Header.GetQueryKey().SeqNo) }) - // sign nested structure, step by step - // this is not required during runtime - // during runtime, nested structures is signed and provided by peers - err = ack.Header.Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = ack.Header.Response.Sign(privKey) - So(err, ShouldBeNil) - err = ack.Sign(privKey, true) + err = ack.Sign(privKey) So(err, ShouldBeNil) Convey("verify", func() { @@ -357,214 +318,6 @@ func TestAck_Sign(t *testing.T) { }) } -func TestNoAckReport_Sign(t *testing.T) { - privKey, _ := getCommKeys() - - Convey("sign", t, func() { - noAck := &NoAckReport{ - Header: SignedNoAckReportHeader{ - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - } - - var err error - - // sign directly, embedded original response/request is not filled - err = noAck.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign nested structure - err = noAck.Header.Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = noAck.Header.Response.Sign(privKey) - So(err, ShouldBeNil) - err = noAck.Sign(privKey) - So(err, ShouldBeNil) - - Convey("verify", func() { - err = noAck.Verify() - So(err, ShouldBeNil) - - Convey("request change", func() { - noAck.Header.Response.Request.QueryType = ReadQuery - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("response change", func() { - noAck.Header.Response.RowCount = 100 - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("header change", func() { - noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - }) - }) -} - -func TestAggrNoAckReport_Sign(t *testing.T) { - privKey, _ := getCommKeys() - - Convey("sign", t, func() { - aggrNoAck := &AggrNoAckReport{ - Header: SignedAggrNoAckReportHeader{ - AggrNoAckReportHeader: AggrNoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - Reports: []SignedNoAckReportHeader{ - { - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - { - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - }, - Peers: &proto.Peers{ - PeersHeader: proto.PeersHeader{ - Term: uint64(1), - Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Servers: []proto.NodeID{ - proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - }, - }, - }, - }, - }, - } - - var err error - - // sign directly, embedded original response/request is not filled - err = aggrNoAck.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign nested structure - err = aggrNoAck.Header.Reports[0].Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[0].Response.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Response.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[0].Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Sign(privKey) - So(err, ShouldBeNil) - - Convey("verify", func() { - err = aggrNoAck.Verify() - So(err, ShouldBeNil) - - Convey("request change", func() { - aggrNoAck.Header.Reports[0].Response.Request.QueryType = ReadQuery - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("response change", func() { - aggrNoAck.Header.Reports[0].Response.RowCount = 1000 - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("report change", func() { - aggrNoAck.Header.Reports[0].Timestamp = aggrNoAck.Header.Reports[0].Timestamp.Add(time.Second) - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("header change", func() { - aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - }) - }) -} - func TestInitServiceResponse_Sign(t *testing.T) { privKey, _ := getCommKeys() diff --git a/types/update_service_type_gen.go b/types/update_service_type_gen.go index c134c6635..e3bbb2f26 100644 --- a/types/update_service_type_gen.go +++ b/types/update_service_type_gen.go @@ -11,17 +11,16 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82, 0x82) - o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) o = append(o, 0x82) - if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) + if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -31,7 +30,7 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedUpdateServiceHeader) Msgsize() (s int) { - s = 1 + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() return } @@ -40,14 +39,13 @@ func (z *UpdateService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -57,7 +55,7 @@ func (z *UpdateService) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateService) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + z.Header.Msgsize() return } @@ -66,13 +64,12 @@ func (z *UpdateServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Instance.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendInt32(o, int32(z.Op)) return } diff --git a/types/updatebilling_gen.go b/types/updatebilling_gen.go index fb471f34f..79a626ffb 100644 --- a/types/updatebilling_gen.go +++ b/types/updatebilling_gen.go @@ -11,20 +11,19 @@ func (z *MinerIncome) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendUint64(o, z.Income) if oTemp, err := z.Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendUint64(o, z.Income) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerIncome) Msgsize() (s int) { - s = 1 + 6 + z.Miner.Msgsize() + 7 + hsp.Uint64Size + s = 1 + 7 + hsp.Uint64Size + 6 + z.Miner.Msgsize() return } @@ -33,20 +32,18 @@ func (z *UpdateBilling) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.UpdateBillingHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.UpdateBillingHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -56,7 +53,7 @@ func (z *UpdateBilling) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateBilling) Msgsize() (s int) { - s = 1 + 20 + z.UpdateBillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 20 + z.UpdateBillingHeader.Msgsize() return } @@ -65,7 +62,17 @@ func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Receiver.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Users))) for za0001 := range z.Users { if z.Users[za0001] == nil { @@ -78,24 +85,12 @@ func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.Receiver.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateBillingHeader) Msgsize() (s int) { - s = 1 + 6 + hsp.ArrayHeaderSize + s = 1 + 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize for za0001 := range z.Users { if z.Users[za0001] == nil { s += hsp.NilSize @@ -103,7 +98,6 @@ func (z *UpdateBillingHeader) Msgsize() (s int) { s += z.Users[za0001].Msgsize() } } - s += 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() return } @@ -112,37 +106,34 @@ func (z *UserCost) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.Cost) o = hsp.AppendArrayHeader(o, uint32(len(z.Miners))) for za0001 := range z.Miners { if z.Miners[za0001] == nil { o = hsp.AppendNil(o) } else { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Miners[za0001].Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendUint64(o, z.Miners[za0001].Income) } } - o = append(o, 0x83) if oTemp, err := z.User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.Cost) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UserCost) Msgsize() (s int) { - s = 1 + 7 + hsp.ArrayHeaderSize + s = 1 + 5 + hsp.Uint64Size + 7 + hsp.ArrayHeaderSize for za0001 := range z.Miners { if z.Miners[za0001] == nil { s += hsp.NilSize @@ -150,6 +141,6 @@ func (z *UserCost) Msgsize() (s int) { s += 1 + 6 + z.Miners[za0001].Miner.Msgsize() + 7 + hsp.Uint64Size } } - s += 5 + z.User.Msgsize() + 5 + hsp.Uint64Size + s += 5 + z.User.Msgsize() return } diff --git a/types/updatepermission.go b/types/updatepermission.go index 1b7ed46a6..729829c3d 100644 --- a/types/updatepermission.go +++ b/types/updatepermission.go @@ -30,7 +30,7 @@ import ( type UpdatePermissionHeader struct { TargetSQLChain proto.AccountAddress TargetUser proto.AccountAddress - Permission UserPermission + Permission *UserPermission Nonce interfaces.AccountNonce } diff --git a/types/updatepermission_gen.go b/types/updatepermission_gen.go index 443bfaa78..11ba931d5 100644 --- a/types/updatepermission_gen.go +++ b/types/updatepermission_gen.go @@ -11,20 +11,18 @@ func (z *UpdatePermission) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.UpdatePermissionHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.UpdatePermissionHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *UpdatePermission) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermission) Msgsize() (s int) { - s = 1 + 23 + z.UpdatePermissionHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 23 + z.UpdatePermissionHeader.Msgsize() return } @@ -43,25 +41,26 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } o = append(o, 0x84) if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } if oTemp, err := z.TargetSQLChain.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) if oTemp, err := z.TargetUser.MarshalHash(); err != nil { return nil, err } else { @@ -72,6 +71,12 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermissionHeader) Msgsize() (s int) { - s = 1 + 11 + z.Permission.Msgsize() + 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() + s = 1 + 6 + z.Nonce.Msgsize() + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += z.Permission.Msgsize() + } + s += 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() return } diff --git a/types/xxx_test.go b/types/xxx_test.go index ab9d8f77f..7da8dffa6 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -68,7 +68,6 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro if err != nil { return - } h := hash.Hash{} @@ -96,8 +95,8 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro } err = b.PackAndSignBlock(priv) - return + return } func generateRandomBillingRequestHeader() *BillingRequestHeader { @@ -109,7 +108,6 @@ func generateRandomBillingRequestHeader() *BillingRequestHeader { HighHeight: rand.Int31(), GasAmounts: generateRandomGasAmount(peerNum), } - } func generateRandomBillingRequest() (req *BillingRequest, err error) { @@ -119,7 +117,6 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { } if _, err = req.PackRequestHeader(); err != nil { return nil, err - } for i := 0; i < peerNum; i++ { @@ -128,36 +125,29 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } - } return - } func generateRandomBillingHeader() (tc *BillingHeader, err error) { var req *BillingRequest if req, err = generateRandomBillingRequest(); err != nil { return - } var priv *asymmetric.PrivateKey if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } receivers := make([]*proto.AccountAddress, peerNum) @@ -169,33 +159,27 @@ func generateRandomBillingHeader() (tc *BillingHeader, err error) { receivers[i] = &accountAddress fees[i] = rand.Uint64() rewards[i] = rand.Uint64() - } producer := proto.AccountAddress(generateRandomHash()) tc = NewBillingHeader(pi.AccountNonce(rand.Uint32()), req, producer, receivers, fees, rewards) return tc, nil - } func generateRandomBilling() (*Billing, error) { header, err := generateRandomBillingHeader() if err != nil { return nil, err - } priv, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { return nil, err - } txBilling := NewBilling(header) if err := txBilling.Sign(priv); err != nil { return nil, err - } return txBilling, nil - } func generateRandomGasAmount(n int) []*proto.AddrAndGas { @@ -207,11 +191,9 @@ func generateRandomGasAmount(n int) []*proto.AddrAndGas { RawNodeID: proto.RawNodeID{Hash: generateRandomHash()}, GasAmount: rand.Uint64(), } - } return gasAmount - } func randBytes(n int) (b []byte) { @@ -269,7 +251,8 @@ func buildResponse(header *SignedRequestHeader, cols []string, types []string, r r = &Response{ Header: SignedResponseHeader{ ResponseHeader: ResponseHeader{ - Request: *header, + Request: header.RequestHeader, + RequestHash: header.Hash(), NodeID: id, Timestamp: time.Now().UTC(), RowCount: 0, @@ -284,7 +267,7 @@ func buildResponse(header *SignedRequestHeader, cols []string, types []string, r Rows: rows, }, } - if err = r.Sign(testingPrivateKey); err != nil { + if err = r.BuildHash(); err != nil { panic(err) } return diff --git a/types/xxxxx_test.go b/types/xxxxx_test.go new file mode 100644 index 000000000..a5f10abf6 --- /dev/null +++ b/types/xxxxx_test.go @@ -0,0 +1,91 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math" + "strings" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +func BenchmarkEncode(b *testing.B) { + Convey("test encode decode", b, func(c C) { + var ( + nodeID proto.NodeID + addr proto.AccountAddress + ) + r := &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + NodeID: nodeID.ToRawNodeID().ToNodeID(), + DatabaseID: addr.DatabaseID(), + ConnectionID: math.MaxUint64, + SeqNo: math.MaxUint64, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: RequestPayload{ + Queries: []Query{ + { + Pattern: strings.Repeat("1", 1024), + Args: []NamedArg{}, + }, + }, + }, + } + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(privKey, ShouldNotBeNil) + + b.Run("a", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err := r.Sign(privKey) + + req, err := utils.EncodeMsgPack(r) + bs := req.Bytes() + + b.Logf("len: %v", len(bs)) + + var e1 *Request + err = utils.DecodeMsgPack(bs, &e1) + err = e1.Verify() + _ = err + + req, err = utils.EncodeMsgPack(r) + bs = req.Bytes() + var e2 *Request + err = utils.DecodeMsgPack(bs, &e2) + err = e2.Verify() + + req, err = utils.EncodeMsgPack(r) + bs = req.Bytes() + var e3 *Request + err = utils.DecodeMsgPack(bs, &e3) + err = e3.Verify() + } + }) + }) +} diff --git a/utils/big.go b/utils/big.go index 7c4556073..0504b2596 100644 --- a/utils/big.go +++ b/utils/big.go @@ -22,6 +22,22 @@ import ( "math/big" ) +const ( + _ = iota + // KB is 1024 Bytes + KB int64 = 1 << (10 * iota) + // MB is 1024 KB + MB + // GB is 1024 MB + GB + // TB is 1024 GB + TB + // PB is 1024 TB + PB + // EB is 1024 PB + EB +) + // Various big integer limit values. var ( tt255 = BigPow(2, 255) diff --git a/utils/log/logwrapper.go b/utils/log/logwrapper.go index 72c1a37cc..0e528810f 100644 --- a/utils/log/logwrapper.go +++ b/utils/log/logwrapper.go @@ -209,6 +209,20 @@ func GetLevel() logrus.Level { return logrus.GetLevel() } +// ParseLevel parse the level string and returns the logger level. +func ParseLevel(lvl string) (logrus.Level, error) { + return logrus.ParseLevel(lvl) +} + +// SetStringLevel enforce current log level. +func SetStringLevel(lvl string, defaultLevel logrus.Level) { + if lvl, err := ParseLevel(lvl); err != nil { + SetLevel(defaultLevel) + } else { + SetLevel(lvl) + } +} + // AddHook adds a hook to the standard logger hooks. func AddHook(hook logrus.Hook) { logrus.AddHook(hook) diff --git a/utils/msgpack.go b/utils/msgpack.go index f443c9184..29dc9a6a1 100644 --- a/utils/msgpack.go +++ b/utils/msgpack.go @@ -39,18 +39,16 @@ func RegisterInterfaceToMsgPack(intf, impl reflect.Type) (err error) { // DecodeMsgPack reverses the encode operation on a byte slice input. func DecodeMsgPack(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) - dec := codec.NewDecoder(r, msgPackHandle) + dec := codec.NewDecoder(bytes.NewReader(buf), msgPackHandle) return dec.Decode(out) } // DecodeMsgPackPlain reverses the encode operation on a byte slice input without RawToString setting. func DecodeMsgPackPlain(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) hd := &codec.MsgpackHandle{ WriteExt: true, } - dec := codec.NewDecoder(r, hd) + dec := codec.NewDecoder(bytes.NewReader(buf), hd) return dec.Decode(out) } diff --git a/utils/path.go b/utils/path.go index 3b2e44192..e0ad6f19c 100644 --- a/utils/path.go +++ b/utils/path.go @@ -19,7 +19,9 @@ package utils import ( "io" "os" + "os/user" "path/filepath" + "strings" ) // CopyFile copies from src to dst until either EOF is reached @@ -46,3 +48,20 @@ func CopyFile(src, dst string) (int64, error) { defer df.Close() return io.Copy(df, sf) } + +// HomeDirExpand tries to expand the tilde (~) in the front of a path +// to a fullpath directory. +func HomeDirExpand(path string) string { + usr, err := user.Current() + if err != nil { + return path + } + + if path == "~" { + return usr.HomeDir + } else if strings.HasPrefix(path, "~/") { + return filepath.Join(usr.HomeDir, strings.TrimPrefix(path, "~/")) + } + + return path +} diff --git a/utils/path_test.go b/utils/path_test.go index b0c4fa8b3..fe1cad44a 100644 --- a/utils/path_test.go +++ b/utils/path_test.go @@ -19,6 +19,7 @@ package utils import ( "io/ioutil" "os" + "os/user" "testing" . "github.com/smartystreets/goconvey/convey" @@ -47,3 +48,22 @@ func TestCopyFile(t *testing.T) { So(n, ShouldBeZeroValue) }) } + +func TestHomeDirExpand(t *testing.T) { + Convey("expand ~ dir", t, func() { + usr, err := user.Current() + So(err, ShouldBeNil) + + homeDir := HomeDirExpand("~") + So(homeDir, ShouldEqual, usr.HomeDir) + + fullFilepathWithHome := HomeDirExpand("~/.local") + So(fullFilepathWithHome, ShouldEqual, usr.HomeDir+"/.local") + + fullFilepathRaw := HomeDirExpand("/dev/null") + So(fullFilepathRaw, ShouldEqual, "/dev/null") + + emptyPath := HomeDirExpand("") + So(emptyPath, ShouldEqual, "") + }) +} diff --git a/utils/timer/timer.go b/utils/timer/timer.go new file mode 100644 index 000000000..4a028269f --- /dev/null +++ b/utils/timer/timer.go @@ -0,0 +1,90 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package timer + +import ( + "sync" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +// Timer defines a stop watch timer for performance analysis. +type Timer struct { + sync.Mutex + start time.Time + names []string + pivots []time.Time +} + +// NewTimer returns a new stop watch timer instance. +func NewTimer() *Timer { + return &Timer{ + start: time.Now(), + } +} + +// Add records a time pivot. +func (t *Timer) Add(name string) { + t.Lock() + defer t.Unlock() + + t.names = append(t.names, name) + t.pivots = append(t.pivots, time.Now()) +} + +// ToLogFields returns analysis results as log fields. +func (t *Timer) ToLogFields() log.Fields { + var ( + m = t.ToMap() + f = log.Fields{} + ) + + for k, v := range m { + f[k] = v + } + + return f +} + +// ToMap returns analysis results as time duration map. +func (t *Timer) ToMap() map[string]time.Duration { + t.Lock() + defer t.Unlock() + + // calc + lp := len(t.pivots) + m := make(map[string]time.Duration, 1+lp) + + for i := 0; i != lp; i++ { + var d time.Duration + if i == 0 { + d = t.pivots[i].Sub(t.start) + } else { + d = t.pivots[i].Sub(t.pivots[i-1]) + } + + m[t.names[i]] = d + + if i+1 == lp { + // last one + m["total"] = t.pivots[i].Sub(t.start) + } + } + + return m +} diff --git a/utils/timer/timer_test.go b/utils/timer/timer_test.go new file mode 100644 index 000000000..c9fc08e99 --- /dev/null +++ b/utils/timer/timer_test.go @@ -0,0 +1,51 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package timer + +import ( + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestTimer(t *testing.T) { + Convey("test timer", t, func() { + t := NewTimer() + time.Sleep(time.Millisecond * 100) + t.Add("stage1") + + time.Sleep(time.Second * 1) + t.Add("stage2") + + m := t.ToMap() + So(m, ShouldHaveLength, 3) + So(m, ShouldContainKey, "stage1") + So(m, ShouldContainKey, "stage2") + So(m["stage1"], ShouldBeGreaterThanOrEqualTo, time.Millisecond*100) + So(m["stage2"], ShouldBeGreaterThanOrEqualTo, time.Second) + So(m["total"], ShouldBeGreaterThanOrEqualTo, time.Second+time.Millisecond*100) + + f := t.ToLogFields() + So(f, ShouldHaveLength, 3) + So(f, ShouldContainKey, "stage1") + So(f, ShouldContainKey, "stage2") + So(m["stage1"], ShouldEqual, f["stage1"]) + So(m["stage2"], ShouldEqual, f["stage2"]) + So(m["total"], ShouldEqual, f["total"]) + }) +} diff --git a/utils/trace/trace_dummy.go b/utils/trace/trace_dummy.go new file mode 100644 index 000000000..a399427bb --- /dev/null +++ b/utils/trace/trace_dummy.go @@ -0,0 +1,76 @@ +// +build !go1.11 + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package trace + +import ( + "context" + "io" +) + +// Task mocks runtime/trace.Task. +type Task struct{} + +// End mocks runtime/trace.Task.End. +func (t *Task) End() {} + +// Region mocks runtime/trace.Region. +type Region struct{} + +// End mocks runtime/trace.Region.End. +func (r *Region) End() {} + +// NewTask mocks runtime/trace.NewTask. +func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { + return pctx, &Task{} +} + +// StartRegion mocks runtime/trace.StartRegion. +func StartRegion(ctx context.Context, regionType string) (region *Region) { + return &Region{} +} + +// WithRegion mocks runtime/trace.WithRegion. +func WithRegion(ctx context.Context, regionType string, fn func()) { + fn() +} + +// IsEnabled mocks runtime/trace.IsEnabled. +func IsEnabled() bool { + return false +} + +// Log mocks runtime/trace.Log. +func Log(ctx context.Context, category, message string) { + return +} + +// Logf mocks runtime/trace.Logf. +func Logf(ctx context.Context, category, message string, args ...interface{}) { + return +} + +// Start mocks runtime/trace.Start. +func Start(w io.Writer) (err error) { + return +} + +// Stop mocks runtime/trace.Stop. +func Stop() { + return +} diff --git a/utils/trace/trace_go111.go b/utils/trace/trace_go111.go new file mode 100644 index 000000000..ea0cd040a --- /dev/null +++ b/utils/trace/trace_go111.go @@ -0,0 +1,71 @@ +// +build go1.11 + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package trace + +import ( + "context" + "io" + "runtime/trace" +) + +// Task wraps runtime.trace.Task. +type Task = trace.Task + +// Region wraps runtime/trace.Task. +type Region = trace.Region + +// NewTask wraps runtime/trace.NewTask. +func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { + return trace.NewTask(pctx, taskType) +} + +// StartRegion wraps runtime/trace.StartRegion. +func StartRegion(ctx context.Context, regionType string) (region *Region) { + return trace.StartRegion(ctx, regionType) +} + +// WithRegion wraps runtime/trace.WithRegion. +func WithRegion(ctx context.Context, regionType string, fn func()) { + trace.WithRegion(ctx, regionType, fn) +} + +// IsEnabled wraps runtime/trace.IsEnabled. +func IsEnabled() bool { + return trace.IsEnabled() +} + +// Log wraps runtime/trace.Log. +func Log(ctx context.Context, category, message string) { + trace.Log(ctx, category, message) +} + +// Logf wraps runtime/trace.Logf. +func Logf(ctx context.Context, category, message string, args ...interface{}) { + trace.Logf(ctx, category, message, args...) +} + +// Start wraps runtime/trace.Start. +func Start(w io.Writer) (err error) { + return trace.Start(w) +} + +// Stop wraps runtime/trace.Stop. +func Stop() { + trace.Stop() +} diff --git a/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md b/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md new file mode 100644 index 000000000..7bc145fd7 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributions are very welcome! + +## First: Create an Issue + +Even if your fix is simple, we'd like to have an issue to relate to +the PR. Discussion about the architecture and value can go on the +issue, leaving PR comments exclusively for coding style. + +## Second: Make Your PR + +- Fork the `master` branch +- Make your change +- Make a PR against the `master` branch + +You don't need to wait for comments on the issue before making your +PR. If you do wait for comments, you'll have a better chance of +getting your PR accepted the first time around, but it's not +necessary. + +## Third: Be Patient + +- If your change breaks backward compatibility, this becomes + especially true. + +We all have lives and jobs, and many of us are no longer on projects +that make use of `gorp`. We will get back to you, but it might take a +while. + +## Fourth: Consider Becoming a Maintainer + +We really do need help. We will likely ask you for help after a good +PR, but if we don't, please create an issue requesting maintainership. +Considering how few of us are currently active, we are unlikely to +refuse good help. diff --git a/vendor/github.com/go-gorp/gorp/LICENSE b/vendor/github.com/go-gorp/gorp/LICENSE new file mode 100644 index 000000000..b661111d0 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-gorp/gorp/README.md b/vendor/github.com/go-gorp/gorp/README.md new file mode 100644 index 000000000..87cb7ba7a --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/README.md @@ -0,0 +1,805 @@ +# Go Relational Persistence + +[![build status](https://img.shields.io/travis/go-gorp/gorp/master.svg)](http://travis-ci.org/go-gorp/gorp) +[![code coverage](https://img.shields.io/coveralls/go-gorp/gorp.svg)](https://coveralls.io/r/go-gorp/gorp) +[![issues](https://img.shields.io/github/issues/go-gorp/gorp.svg)](https://github.com/go-gorp/gorp/issues) +[![godoc v1](https://img.shields.io/badge/godoc-v1-375EAB.svg)](https://godoc.org/gopkg.in/gorp.v1) +[![godoc v2](https://img.shields.io/badge/godoc-v2-375EAB.svg)](https://godoc.org/gopkg.in/gorp.v2) +[![godoc bleeding edge](https://img.shields.io/badge/godoc-bleeding--edge-375EAB.svg)](https://godoc.org/github.com/go-gorp/gorp) + +### Update 2016-11-13: Future versions + +As many of the maintainers have become busy with other projects, +progress toward the ever-elusive v2 has slowed to the point that we're +only occasionally making progress outside of merging pull requests. +In the interest of continuing to release, I'd like to lean toward a +more maintainable path forward. + +For the moment, I am releasing a v2 tag with the current feature set +from master, as some of those features have been actively used and +relied on by more than one project. Our next goal is to continue +cleaning up the code base with non-breaking changes as much as +possible, but if/when a breaking change is needed, we'll just release +new versions. This allows us to continue development at whatever pace +we're capable of, without delaying the release of features or refusing +PRs. + +## Introduction + +I hesitate to call gorp an ORM. Go doesn't really have objects, at +least not in the classic Smalltalk/Java sense. There goes the "O". +gorp doesn't know anything about the relationships between your +structs (at least not yet). So the "R" is questionable too (but I use +it in the name because, well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting +data in and out of your database, and helps your code focus on +algorithms, not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for + update/deletes) + +## Installation + +Use `go get` or your favorite vendoring tool, using whichever import +path you'd like. + +## Versioning + +We use semantic version tags. Feel free to import through `gopkg.in` +(e.g. `gopkg.in/gorp.v2`) to get the latest tag for a major version, +or check out the tag using your favorite vendoring tool. + +Development is not very active right now, but we have plans to +restructure `gorp` as we continue to move toward a more extensible +system. Whenever a breaking change is needed, the major version will +be bumped. + +The `master` branch is where all development is done, and breaking +changes may happen from time to time. That said, if you want to live +on the bleeding edge and are comfortable updating your code when we +make a breaking change, you may use `github.com/go-gorp/gorp` as your +import path. + +Check the version tags to see what's available. We'll make a good +faith effort to add badges for new versions, but we make no +guarantees. + +## Supported Go versions + +This package is guaranteed to be compatible with the latest 2 major +versions of Go. + +Any earlier versions are only supported on a best effort basis and can +be dropped any time. Go has a great compatibility promise. Upgrading +your program to a newer version of Go should never really be a +problem. + +## Migration guide + +#### Pre-v2 to v2 +Automatic mapping of the version column used in optimistic locking has +been removed as it could cause problems if the type was not int. The +version column must now explicitly be set with +`tablemap.SetVersionCol()`. + +## Help/Support + +Use our [`gitter` channel](https://gitter.im/go-gorp/gorp). We used +to use IRC, but with most of us being pulled in many directions, we +often need the email notifications from `gitter` to yell at us to sign +in. + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string `db:",size:50"` // Column size set to 50 + Body string `db:"article_body,size:1024"` // Set both column name and size +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +// You can optionally declare the field to be a primary key and/or autoincrement +// +type Product struct { + Id int64 `db:"product_id, primarykey, autoincrement"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like [goose](https://bitbucket.org/liamstask/goose) or [migrate](https://github.com/mattes/migrate)). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +dbmap.Insert(p1) + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +dbmap.Insert(inv1) + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + trans.Insert(per) + inv.PersonId = per.Id + trans.Insert(inv) + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + + _, err := s.Exec(query, p.Id) + + if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +#### Note that this behaviour has changed in v2. See [Migration Guide](#migration-guide). + +gorp provides a simple optimistic locking feature, similar to Java's +JPA, that will raise an error if you try to update/delete a row whose +`version` column has a value different than the one in memory. This +provides a safe way to do "select then update" style operations +without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +dbmap.Insert(p1) // Version is now 1 + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +dbmap.Update(p2) // Version is now 2 + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` +### Adding INDEX(es) on column(s) beyond the primary key ### + +Indexes are frequently critical for performance. Here is how to add +them to your tables. + +NB: SqlServer and Oracle need testing and possible adjustment to the +CreateIndexSuffix() and DropIndexSuffix() methods to make AddIndex() +work for them. + +In the example below we put an index both on the Id field, and on the +AcctId field. + +``` +type Account struct { + Id int64 + AcctId string // e.g. this might be a long uuid for portability +} + +// indexType (the 2nd param to AddIndex call) is "Btree" or "Hash" for MySQL. +// demonstrate adding a second index on AcctId, and constrain that field to have unique values. +dbm.AddTable(iptab.Account{}).SetKeys(true, "Id").AddIndex("AcctIdIndex", "Btree", []string{"AcctId"}).SetUnique(true) + +err = dbm.CreateTablesIfNotExists() +checkErr(err, "CreateTablesIfNotExists failed") + +err = dbm.CreateIndex() +checkErr(err, "CreateIndex failed") + +``` +Check the effect of the CreateIndex() call in mysql: +``` +$ mysql + +MariaDB [test]> show create table Account; ++---------+--------------------------+ +| Account | CREATE TABLE `Account` ( + `Id` bigint(20) NOT NULL AUTO_INCREMENT, + `AcctId` varchar(255) DEFAULT NULL, + PRIMARY KEY (`Id`), + UNIQUE KEY `AcctIdIndex` (`AcctId`) USING BTREE <<<--- yes! index added. +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ++---------+--------------------------+ + +``` + + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant +drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect +interface that should be implemented per database vendor. Dialects +are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` +for example DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: + github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) +have no good way to test them locally. So please try them and send +patches as needed, but expect a bit more unpredicability. + +## Sqlite3 Extensions + +In order to use sqlite3 extensions you need to first register a custom driver: + +```go +import ( + "database/sql" + + // use whatever database/sql driver you wish + sqlite "github.com/mattn/go-sqlite3" +) + +func customDriver() (*sql.DB, error) { + + // create custom driver with extensions defined + sql.Register("sqlite3-custom", &sqlite.SQLiteDriver{ + Extensions: []string{ + "mod_spatialite", + }, + }) + + // now you can then connect using the 'sqlite3-custom' driver instead of 'sqlite3' + return sql.Open("sqlite3-custom", "/tmp/post_db.bin") +} +``` + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable +placeholders in prepared SQL statements. Unlike some database +abstraction layers (such as JDBC), Go's `database/sql` does not +standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` +methods delegates to a Dialect implementation for each database, and +will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, +etc will not be parsed. Consequently you may have portability issues +if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql err := +dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work +around this. The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", +map[string]interface{} { "id": 30}) +``` + +Additionally, when using Postgres as your database, you should utilize +`$1` instead of `?` placeholders as utilizing `?` placeholders when +querying Postgres will result in `pq: operator does not exist` +errors. Alternatively, use `dbMap.Dialect.BindVar(varIdx)` to get the +proper variable binding for your dialect. + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` +driver, but note that the behavior of this type varies across database +drivers. + +MySQL users should be especially cautious. See: +https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider: + +- Using an integer field for time data and storing UNIX time. +- Using a custom time type that implements some SQL types: + - [`"database/sql".Scanner`](https://golang.org/pkg/database/sql/#Scanner) + - [`"database/sql/driver".Valuer`](https://golang.org/pkg/database/sql/driver/#Valuer) + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which +driver to use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql"(for mymysql), +"gomysql"(for go-sql-driver), "postgres", "sqlite" See the +`test_all.sh` script for examples of all 3 databases. This is the +script I run locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. +See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a +simple perf test. On my MacBook Pro gorp is about 2-3% slower than +hand written SQL. + + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/vendor/github.com/go-gorp/gorp/column.go b/vendor/github.com/go-gorp/gorp/column.go new file mode 100644 index 000000000..99d4fd555 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/column.go @@ -0,0 +1,83 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import "reflect" + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Query used for getting generated id after insert + GeneratedIdQuery string + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + MaxSize int + + DefaultValue string + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} diff --git a/vendor/github.com/go-gorp/gorp/db.go b/vendor/github.com/go-gorp/gorp/db.go new file mode 100644 index 000000000..dfb92c952 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/db.go @@ -0,0 +1,787 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "log" + "reflect" + "strconv" + "strings" + "time" +) + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + ctx context.Context + + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + tables []*TableMap + tablesDynamic map[string]*TableMap // tables that use same go-struct and different db table names + logger GorpLogger + logPrefix string +} + +func (m *DbMap) dynamicTableAdd(tableName string, tbl *TableMap) { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + m.tablesDynamic[tableName] = tbl +} + +func (m *DbMap) dynamicTableFind(tableName string) (*TableMap, bool) { + if m.tablesDynamic == nil { + return nil, false + } + tbl, found := m.tablesDynamic[tableName] + return tbl, found +} + +func (m *DbMap) dynamicTableMap() map[string]*TableMap { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + return m.tablesDynamic +} + +func (m *DbMap) WithContext(ctx context.Context) SqlExecutor { + copy := &DbMap{} + *copy = *m + copy.ctx = ctx + return copy +} + +func (m *DbMap) CreateIndex() error { + var err error + dialect := reflect.TypeOf(m.Dialect) + for _, table := range m.tables { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + for _, table := range m.dynamicTableMap() { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + return err +} + +func (m *DbMap) createIndexImpl(dialect reflect.Type, + table *TableMap, + index *IndexMap) error { + s := bytes.Buffer{} + s.WriteString("create") + if index.Unique { + s.WriteString(" unique") + } + s.WriteString(" index") + s.WriteString(fmt.Sprintf(" %s on %s", index.IndexName, table.TableName)) + if dname := dialect.Name(); dname == "PostgresDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(" (") + for x, col := range index.columns { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(col)) + } + s.WriteString(")") + + if dname := dialect.Name(); dname == "MySQLDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(";") + _, err := m.Exec(s.String()) + return err +} + +func (t *TableMap) DropIndex(name string) error { + + var err error + dialect := reflect.TypeOf(t.dbmap.Dialect) + for _, idx := range t.indexes { + if idx.IndexName == name { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("DROP INDEX %s", idx.IndexName)) + + if dname := dialect.Name(); dname == "MySQLDialect" { + s.WriteString(fmt.Sprintf(" %s %s", t.dbmap.Dialect.DropIndexSuffix(), t.TableName)) + } + s.WriteString(";") + _, e := t.dbmap.Exec(s.String()) + if e != nil { + err = e + } + break + } + } + t.ResetSql() + return err +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + return tmap +} + +// AddTableDynamic registers the given interface type with gorp. +// The table name will be dynamically determined at runtime by +// using the GetTableName method on DynamicTable interface +func (m *DbMap) AddTableDynamic(inp DynamicTable, schema string) *TableMap { + + val := reflect.ValueOf(inp) + elm := val.Elem() + t := elm.Type() + name := inp.TableName() + if name == "" { + panic("Missing table name in DynamicTable instance") + } + + // Check if there is another dynamic table with the same name + if _, found := m.dynamicTableFind(name); found { + panic(fmt.Sprintf("A table with the same name %v already exists", name)) + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + m.dynamicTableAdd(name, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, primaryKey []*ColumnMap) { + primaryKey = make([]*ColumnMap, 0) + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subpk := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subpk != nil { + primaryKey = append(primaryKey, subpk...) + } + } else { + // Tag = Name { ',' Option } + // Option = OptionKey [ ':' OptionValue ] + cArguments := strings.Split(f.Tag.Get("db"), ",") + columnName := cArguments[0] + var maxSize int + var defaultValue string + var isAuto bool + var isPK bool + var isNotNull bool + for _, argString := range cArguments[1:] { + argString = strings.TrimSpace(argString) + arg := strings.SplitN(argString, ":", 2) + + // check mandatory/unexpected option values + switch arg[0] { + case "size", "default": + // options requiring value + if len(arg) == 1 { + panic(fmt.Sprintf("missing option value for option %v on field %v", arg[0], f.Name)) + } + default: + // options where value is invalid (currently all other options) + if len(arg) == 2 { + panic(fmt.Sprintf("unexpected option value for option %v on field %v", arg[0], f.Name)) + } + } + + switch arg[0] { + case "size": + maxSize, _ = strconv.Atoi(arg[1]) + case "default": + defaultValue = arg[1] + case "primarykey": + isPK = true + case "autoincrement": + isAuto = true + case "notnull": + isNotNull = true + default: + panic(fmt.Sprintf("Unrecognized tag option for field %v: %v", f.Name, arg)) + } + } + if columnName == "" { + columnName = f.Name + } + + gotype := f.Type + valueType := gotype + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + value := reflect.New(valueType).Interface() + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + value = scanner.Holder + gotype = reflect.TypeOf(value) + } + } + if typer, ok := value.(SqlTyper); ok { + gotype = reflect.TypeOf(typer.SqlType()) + } else if typer, ok := value.(legacySqlTyper); ok { + log.Printf("Deprecation Warning: update your SqlType methods to return a driver.Value") + gotype = reflect.TypeOf(typer.SqlType()) + } else if valuer, ok := value.(driver.Valuer); ok { + // Only check for driver.Valuer if SqlTyper wasn't + // found. + v, err := valuer.Value() + if err == nil && v != nil { + gotype = reflect.TypeOf(v) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + DefaultValue: defaultValue, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + isPK: isPK, + isAutoIncr: isAuto, + isNotNull: isNotNull, + MaxSize: maxSize, + } + if isPK { + primaryKey = append(primaryKey, cm) + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + } + + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + sql := table.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + for _, tbl := range m.dynamicTableMap() { + sql := tbl.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + return err +} + +// DropTable drops an individual table. +// Returns an error when the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, false) +} + +// DropTableIfExists drops an individual table when the table exists. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + for _, table := range m.dynamicTableMap() { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, name string, addIfExists bool) error { + table := tableOrNil(m, t, name) + if table == nil { + return fmt.Errorf("table %s was not registered", table.TableName) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + for _, table := range m.dynamicTableMap() { + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, nil, list...) +} + +// UpdateColumns runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// Only the columns accepted by filter are included in the UPDATE. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(m, m, filter, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(m, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, "begin;") + } + tx, err := begin(m) + if err != nil { + return nil, err + } + return &Transaction{ + dbmap: m, + tx: tx, + closed: false, + }, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t, "") + if table == nil { + return nil, fmt.Errorf("no table found for type: %v", t.Name()) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// DynamicTableFor returns the *TableMap for the dynamic table corresponding +// to the input tablename +// If no table is mapped to that tablename an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) DynamicTableFor(tableName string, checkPK bool) (*TableMap, error) { + table, found := m.dynamicTableFind(tableName) + if !found { + return nil, fmt.Errorf("gorp: no table found for name: %v", tableName) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, nil) + } + return prepare(m, query) +} + +func tableOrNil(m *DbMap, t reflect.Type, name string) *TableMap { + if name != "" { + // Search by table name (dynamic tables) + if table, found := m.dynamicTableFind(name); found { + return table + } + return nil + } + + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + ifc := elem.Interface() + var t *TableMap + var err error + tableName := "" + if dyn, isDyn := ptr.(DynamicTable); isDyn { + tableName = dyn.TableName() + t, err = m.DynamicTableFor(tableName, checkPK) + } else { + etype := reflect.TypeOf(ifc) + t, err = m.TableFor(etype, checkPK) + } + + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) QueryRow(query string, args ...interface{}) *sql.Row { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return queryRow(m, query, args...) +} + +func (m *DbMap) Query(q string, args ...interface{}) (*sql.Rows, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, q, args...) + } + return query(m, q, args...) +} + +func (m *DbMap) trace(started time.Time, query string, args ...interface{}) { + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s] (%v)", m.logPrefix, query, margs, (time.Now().Sub(started))) + } +} diff --git a/vendor/github.com/go-gorp/gorp/dialect.go b/vendor/github.com/go-gorp/gorp/dialect.go new file mode 100644 index 000000000..22e30999d --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect.go @@ -0,0 +1,112 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "reflect" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to append to "create index" statement + CreateIndexSuffix() string + + // string to append to "drop index" statement + DropIndexSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existence clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +// TargetQueryInserter is implemented by dialects that can perform +// assignment of integer primary key type by executing a query +// like "select sequence.currval from dual". +type TargetQueryInserter interface { + // TargetQueryInserter runs an insert operation and assigns the + // automatically generated primary key retrived by the query + // extracted from the GeneratedIdQuery field of the id column. + InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_mysql.go b/vendor/github.com/go-gorp/gorp/dialect_mysql.go new file mode 100644 index 000000000..06606b8b6 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_mysql.go @@ -0,0 +1,176 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + + /* == About varchar(N) == + * N is number of characters. + * A varchar column can store up to 65535 bytes. + * Remember that 1 character is 3 bytes in utf-8 charset. + * Also remember that each row can store up to 65535 bytes, + * and you have some overheads, so it's not possible for a + * varchar column to have 65535/3 characters really. + * So it would be better to use 'text' type in stead of + * large varchar type. + */ + if maxsize < 256 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) CreateIndexSuffix() string { + return "using" +} + +func (d MySQLDialect) DropIndexSuffix() string { + return "on" +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +func (d MySQLDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("sleep(%f)", s.Seconds()) +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_oracle.go b/vendor/github.com/go-gorp/gorp/dialect_oracle.go new file mode 100644 index 000000000..c381380f9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_oracle.go @@ -0,0 +1,146 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) CreateIndexSuffix() string { return "" } + +func (d OracleDialect) DropIndexSuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "NULL" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +// After executing the insert uses the ColMap IdQuery to get the generated id +func (d OracleDialect) InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error { + _, err := exec.Exec(insertSql, params...) + if err != nil { + return err + } + id, err := exec.SelectInt(idSql) + if err != nil { + return err + } + switch target.(type) { + case *int64: + *(target.(*int64)) = id + case *int32: + *(target.(*int32)) = int32(id) + case int: + *(target.(*int)) = int(id) + default: + return fmt.Errorf("Id field can be int, int32 or int64") + } + return nil +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_postgres.go b/vendor/github.com/go-gorp/gorp/dialect_postgres.go new file mode 100644 index 000000000..07c9bb9a6 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_postgres.go @@ -0,0 +1,156 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type PostgresDialect struct { + suffix string + LowercaseFields bool +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + d.QuoteField(col.ColumnName) +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) CreateIndexSuffix() string { + return "using" +} + +func (d PostgresDialect) DropIndexSuffix() string { + return "" +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +func (d PostgresDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("pg_sleep(%f)", s.Seconds()) +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d PostgresDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_sqlite.go b/vendor/github.com/go-gorp/gorp/dialect_sqlite.go new file mode 100644 index 000000000..7d9b29757 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_sqlite.go @@ -0,0 +1,119 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" +) + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SqliteDialect) CreateIndexSuffix() string { + return "" +} + +func (d SqliteDialect) DropIndexSuffix() string { + return "" +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go b/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go new file mode 100644 index 000000000..8808af598 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go @@ -0,0 +1,152 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Microsoft SQL Server databases. +// Use gorp.SqlServerDialect{"2005"} for legacy datatypes. +// Tested with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + + // If set to "2005" legacy datatypes will be used + Version string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "numeric(20,0)" + case reflect.Float32: + return "float(24)" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "bit" + case "NullTime", "Time": + if d.Version == "2005" { + return "datetime" + } + return "datetime2" + } + + if maxsize < 1 { + if d.Version == "2005" { + maxsize = 255 + } else { + return fmt.Sprintf("nvarchar(max)") + } + } + return fmt.Sprintf("nvarchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +func (d SqlServerDialect) CreateTableSuffix() string { return ";" } + +func (d SqlServerDialect) TruncateClause() string { + return "truncate table" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return "[" + strings.Replace(f, "]", "]]", -1) + "]" +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + return d.QuoteField(schema) + "." + d.QuoteField(table) +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if schema_id(N'%s') is null %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", d.QuoteField(schema)) + } + s := fmt.Sprintf("if object_id('%s%s') is not null %s", schema_clause, d.QuoteField(table), command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", schema) + } + s := fmt.Sprintf("if object_id('%s%s') is null %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) CreateIndexSuffix() string { return "" } +func (d SqlServerDialect) DropIndexSuffix() string { return "" } diff --git a/vendor/github.com/go-gorp/gorp/errors.go b/vendor/github.com/go-gorp/gorp/errors.go new file mode 100644 index 000000000..d13f03fc3 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/errors.go @@ -0,0 +1,38 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +// TODO: discuss wether this needs an error. encoding/json silently ignores missing fields +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: no fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/vendor/github.com/go-gorp/gorp/gorp.go b/vendor/github.com/go-gorp/gorp/gorp.go new file mode 100644 index 000000000..40e601ca5 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp.go @@ -0,0 +1,608 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// +package gorp + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// OracleString (empty string is null) +// TODO: move to dialect/oracle?, rename to String? +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// SqlTyper is a type that returns its database type. Most of the +// time, the type can just use "database/sql/driver".Valuer; but when +// it returns nil for its empty value, it needs to implement SqlTyper +// to have its column type detected properly during table creation. +type SqlTyper interface { + SqlType() driver.Value +} + +// legacySqlTyper prevents breaking clients who depended on the previous +// SqlTyper interface +type legacySqlTyper interface { + SqlType() driver.Valuer +} + +// for fields that exists in DB table, but not exists in struct +type dummyField struct{} + +// Scan implements the Scanner interface. +func (nt *dummyField) Scan(value interface{}) error { + return nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + WithContext(ctx context.Context) SqlExecutor + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// DynamicTable allows the users of gorp to dynamically +// use different database table names during runtime +// while sharing the same golang struct for in-memory data +type DynamicTable interface { + TableName() string + SetTableName(string) +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + var v interface{} = a + if x, ok := v.(driver.Valuer); ok { + y, err := x.Value() + if err == nil { + v = y + } + } + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +// Calls the Exec function on the executor, but attempts to expand any eligible named +// query arguments first. +func maybeExpandNamedQueryAndExec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + dbMap := extractDbMap(e) + + if len(args) == 1 { + query, args = maybeExpandNamedQuery(dbMap, query, args) + } + + return exec(e, query, args...) +} + +func extractDbMap(e SqlExecutor) *DbMap { + switch m := e.(type) { + case *DbMap: + return m + case *Transaction: + return m.dbmap + } + return nil +} + +func extractExecutorAndContext(e SqlExecutor) (executor, context.Context) { + switch m := e.(type) { + case *DbMap: + return m.Db, m.ctx + case *Transaction: + return m.tx, m.ctx + } + return nil, nil +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + var ( + arg = args[0] + argval = reflect.ValueOf(arg) + ) + if argval.Kind() == reflect.Ptr { + argval = argval.Elem() + } + + if argval.Kind() == reflect.Map && argval.Type().Key().Kind() == reflect.String { + return expandNamedQuery(m, query, func(key string) reflect.Value { + return argval.MapIndex(reflect.ValueOf(key)) + }) + } + if argval.Kind() != reflect.Struct { + return query, args + } + if _, ok := arg.(time.Time); ok { + // time.Time is driver.Value + return query, args + } + if _, ok := arg.(driver.Valuer); ok { + // driver.Valuer will be converted to driver.Value. + return query, args + } + + return expandNamedQuery(m, query, argval.FieldByName) +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, name string, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t, name) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + cArguments := strings.Split(field.Tag.Get("db"), ",") + fieldName = cArguments[0] + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +type foundTable struct { + table *TableMap + dynName *string +} + +func tableFor(m *DbMap, t reflect.Type, i interface{}) (*foundTable, error) { + if dyn, isDynamic := i.(DynamicTable); isDynamic { + tableName := dyn.TableName() + table, err := m.DynamicTableFor(tableName, true) + if err != nil { + return nil, err + } + return &foundTable{ + table: table, + dynName: &tableName, + }, nil + } + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + return &foundTable{table: table}, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + foundTable, err := tableFor(m, t, i) + if err != nil { + return nil, err + } + table := foundTable.table + + plan := table.bindGet() + + v := reflect.New(t) + if foundTable.dynName != nil { + retDyn := v.Interface().(DynamicTable) + retDyn.SetTableName(*foundTable.dynName) + } + + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.QueryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, colFilter ColumnFilter, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem, colFilter) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + case TargetQueryInserter: + var idQuery = table.ColMap(bi.autoIncrFieldName).GeneratedIdQuery + if idQuery == "" { + return fmt.Errorf("gorp: cannot set %s value if its ColumnMap.GeneratedIdQuery is empty", bi.autoIncrFieldName) + } + err := inserter.InsertQueryToTarget(exec, bi.query, idQuery, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/go-gorp/gorp/gorp_go17.go b/vendor/github.com/go-gorp/gorp/gorp_go17.go new file mode 100644 index 000000000..95cc989d1 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp_go17.go @@ -0,0 +1,54 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// + +// +build !go1.8 + +package gorp + +import "database/sql" + +// Executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, _ := extractExecutorAndContext(e) + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/gorp_go18.go b/vendor/github.com/go-gorp/gorp/gorp_go18.go new file mode 100644 index 000000000..ecebd473f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp_go18.go @@ -0,0 +1,81 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// + +// +build go1.8 + +package gorp + +import ( + "context" + "database/sql" +) + +// executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.ExecContext(ctx, query, args...) + } + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.PrepareContext(ctx, query) + } + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryRowContext(ctx, query, args...) + } + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryContext(ctx, query, args...) + } + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + if m.ctx != nil { + return m.Db.BeginTx(m.ctx, nil) + } + + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/hooks.go b/vendor/github.com/go-gorp/gorp/hooks.go new file mode 100644 index 000000000..192b51f00 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/hooks.go @@ -0,0 +1,49 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +//++ TODO v2-phase3: HasPostGet => PostGetter, HasPostDelete => PostDeleter, etc. + +// PostUpdate() will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// PostUpdate() will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// PostUpdate() will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// PostInsert() will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// PreDelete() will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// PreUpdate() will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// PreInsert() will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/vendor/github.com/go-gorp/gorp/index.go b/vendor/github.com/go-gorp/gorp/index.go new file mode 100644 index 000000000..01ecd9eca --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/index.go @@ -0,0 +1,56 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +// IndexMap represents a mapping between a Go struct field and a single +// index in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type IndexMap struct { + // Index name in db table + IndexName string + + // If true, " unique" is added to create index statements. + // Not used elsewhere + Unique bool + + // Index type supported by Dialect + // Postgres: B-tree, Hash, GiST and GIN. + // Mysql: Btree, Hash. + // Sqlite: nil. + IndexType string + + // Columns name for single and multiple indexes + columns []string +} + +// Rename allows you to specify the index name in the table +// +// Example: table.IndMap("customer_test_idx").Rename("customer_idx") +// +func (idx *IndexMap) Rename(indname string) *IndexMap { + idx.IndexName = indname + return idx +} + +// SetUnique adds "unique" to the create index statements for this +// index, if b is true. +func (idx *IndexMap) SetUnique(b bool) *IndexMap { + idx.Unique = b + return idx +} + +// SetIndexType specifies the index type supported by chousen SQL Dialect +func (idx *IndexMap) SetIndexType(indtype string) *IndexMap { + idx.IndexType = indtype + return idx +} diff --git a/vendor/github.com/go-gorp/gorp/lockerror.go b/vendor/github.com/go-gorp/gorp/lockerror.go new file mode 100644 index 000000000..07b3047ae --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/lockerror.go @@ -0,0 +1,63 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" +) + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} diff --git a/vendor/github.com/go-gorp/gorp/logging.go b/vendor/github.com/go-gorp/gorp/logging.go new file mode 100644 index 000000000..89d6c0e79 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/logging.go @@ -0,0 +1,44 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import "fmt" + +type GorpLogger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies GorpLogger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} diff --git a/vendor/github.com/go-gorp/gorp/nulltypes.go b/vendor/github.com/go-gorp/gorp/nulltypes.go new file mode 100644 index 000000000..870770372 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/nulltypes.go @@ -0,0 +1,58 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "database/sql/driver" + "time" +) + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + switch t := value.(type) { + case time.Time: + nt.Time, nt.Valid = t, true + case []byte: + nt.Valid = false + for _, dtfmt := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + "2006-01-02 15:04:05-07:00", + } { + var err error + if nt.Time, err = time.Parse(dtfmt, string(t)); err == nil { + nt.Valid = true + break + } + } + } + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-gorp/gorp/select.go b/vendor/github.com/go-gorp/gorp/select.go new file mode 100644 index 000000000..fa9cae8da --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/select.go @@ -0,0 +1,366 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "database/sql" + "fmt" + "reflect" +) + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { // FIXME: double negative, rename NonFatalError to FatalError + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { // FIXME: invert if/else + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.Query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + tableName := "" + var dynObj DynamicTable + isDynamic := false + if dynObj, isDynamic = i.(DynamicTable); isDynamic { + tableName = dynObj.TableName() + } + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.Query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + colToFieldIndex, err = columnToFieldIndex(m, t, tableName, cols) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + + if isDynamic { + v.Interface().(DynamicTable).SetTableName(tableName) + } + + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy dummyField + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} diff --git a/vendor/github.com/go-gorp/gorp/table.go b/vendor/github.com/go-gorp/gorp/table.go new file mode 100644 index 000000000..5c513909a --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/table.go @@ -0,0 +1,247 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + indexes []*IndexMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0) + for _, name := range fieldNames { + columns = append(columns, name) + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// IdxMap returns the IndexMap pointer matching the given index name. +func (t *TableMap) IdxMap(field string) *IndexMap { + for _, idx := range t.indexes { + if idx.IndexName == field { + return idx + } + } + return nil +} + +// AddIndex registers the index with gorp for specified table with given parameters. +// This operation is idempotent. If index is already mapped, the +// existing *IndexMap is returned +// Function will panic if one of the given for index columns does not exists +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +func (t *TableMap) AddIndex(name string, idxtype string, columns []string) *IndexMap { + // check if we have a index with this name already + for _, idx := range t.indexes { + if idx.IndexName == name { + return idx + } + } + for _, icol := range columns { + if res := t.ColMap(icol); res == nil { + e := fmt.Sprintf("No ColumnName in table %s to create index on", t.TableName) + panic(e) + } + } + + idx := &IndexMap{IndexName: name, Unique: false, IndexType: idxtype, columns: columns} + t.indexes = append(t.indexes, idx) + t.ResetSql() + return idx +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +// SqlForCreateTable gets a sequence of SQL commands that will create +// the specified table and any associated schema +func (t *TableMap) SqlForCreate(ifNotExists bool) string { + s := bytes.Buffer{} + dialect := t.dbmap.Dialect + + if strings.TrimSpace(t.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(dialect.IfSchemaNotExists(schemaCreate, t.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", t.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(dialect.IfTableNotExists(tableCreate, t.SchemaName, t.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(t.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", dialect.AutoIncrStr())) + } + + x++ + } + } + if len(t.keys) > 1 { + s.WriteString(", primary key (") + for x := range t.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(t.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(t.uniqueTogether) > 0 { + for _, columns := range t.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(dialect.CreateTableSuffix()) + s.WriteString(dialect.QuerySuffix()) + return s.String() +} diff --git a/vendor/github.com/go-gorp/gorp/table_bindings.go b/vendor/github.com/go-gorp/gorp/table_bindings.go new file mode 100644 index 000000000..5b049a360 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/table_bindings.go @@ -0,0 +1,312 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "sync" +) + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Used to filter columns when selectively updating +type ColumnFilter func(*ColumnMap) bool + +func acceptAllFilter(col *ColumnMap) bool { + return true +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string + once sync.Once +} + +func (plan *bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := &t.insertPlan + plan.once.Do(func() { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + if col.DefaultValue == "" { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } else { + s2.WriteString(col.DefaultValue) + } + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value, colFilter ColumnFilter) (bindInstance, error) { + if colFilter == nil { + colFilter = acceptAllFilter + } + + plan := &t.updatePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient && colFilter(col) { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := &t.deletePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() *bindPlan { + plan := &t.getPlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan +} diff --git a/vendor/github.com/go-gorp/gorp/test_all.sh b/vendor/github.com/go-gorp/gorp/test_all.sh new file mode 100755 index 000000000..4c99584ef --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/test_all.sh @@ -0,0 +1,41 @@ +#!/bin/bash -e + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +coveralls_testflags="-v -covermode=count -coverprofile=coverage.out" + +echo "Running unit tests" +ginkgo -r -race -randomizeAllSpecs -keepGoing -- -test.run TestGorp + +echo "Testing against mysql" +export GORP_TEST_DSN=gorptest/gorptest/gorptest +export GORP_TEST_DIALECT=mysql +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against gomysql" +export GORP_TEST_DSN=gorptest:gorptest@/gorptest +export GORP_TEST_DIALECT=gomysql +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against postgres" +export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against sqlite" +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test $coveralls_testflags $GOBUILDFLAG $@ . +rm -f /tmp/gorptest.bin + +case $(go version) in + *go1.4*) + if [ "$(type -p goveralls)" != "" ]; then + goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci + elif [ -x $HOME/gopath/bin/goveralls ]; then + $HOME/gopath/bin/goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci + fi + ;; + *) ;; +esac diff --git a/vendor/github.com/go-gorp/gorp/transaction.go b/vendor/github.com/go-gorp/gorp/transaction.go new file mode 100644 index 000000000..4a4486fcd --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/transaction.go @@ -0,0 +1,202 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "context" + "database/sql" + "time" +) + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + ctx context.Context + dbmap *DbMap + tx *sql.Tx + closed bool +} + +func (t *Transaction) WithContext(ctx context.Context) SqlExecutor { + copy := &Transaction{} + *copy = *t + copy.ctx = ctx + return copy +} + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, nil, list...) +} + +// UpdateColumns had the same behavior as DbMap.UpdateColumns(), but runs in a transaction. +func (t *Transaction) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(t.dbmap, t, filter, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return exec(t, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "commit;") + } + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "rollback;") + } + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + return prepare(t, query) +} + +func (t *Transaction) QueryRow(query string, args ...interface{}) *sql.Row { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return queryRow(t, query, args...) +} + +func (t *Transaction) Query(q string, args ...interface{}) (*sql.Rows, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, q, args...) + } + return query(t, q, args...) +} diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..1931f4006 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..20e391f86 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + +
+ + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..2e32fd506 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,395 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, net.DialContext is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } else { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + var err error + if trace != nil { + err = doHandshakeWithTrace(trace, tlsConn, cfg) + } else { + err = doHandshake(tlsConn, cfg) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000..4f0d94372 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000..babb007fb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..d2a21c148 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1165 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan bool, 1) + mu <- true + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go new file mode 100644 index 000000000..a509a21f8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "net" + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 000000000..37edaff5a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..dcce1a63c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..577fce9ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..2aac060e5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..74ec565d2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..bf2478e43 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + fowardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.fowardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..a761824b3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,363 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-WebSocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go new file mode 100644 index 000000000..834f122a0 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace.go @@ -0,0 +1,19 @@ +// +build go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + if trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(tlsConn, cfg) + if trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go new file mode 100644 index 000000000..77d05a0b5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace_17.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + return doHandshake(tlsConn, cfg) +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..354001e1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 000000000..2e668f6b8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 000000000..e474cd075 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 000000000..33e58cfaf --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 000000000..555225a21 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 000000000..824cb97e8 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/golang-lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 000000000..c8d9b0a23 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,110 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/LICENSE b/vendor/github.com/sourcegraph/jsonrpc2/LICENSE new file mode 100644 index 000000000..89d864bd8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2016 Sourcegraph Inc + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sourcegraph/jsonrpc2/README.md b/vendor/github.com/sourcegraph/jsonrpc2/README.md new file mode 100644 index 000000000..d2406ab07 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/README.md @@ -0,0 +1,12 @@ +# jsonrpc2: JSON-RPC 2.0 implementation for Go [![Build Status](https://travis-ci.org/sourcegraph/jsonrpc2.svg)](https://travis-ci.org/sourcegraph/jsonrpc2) [![Sourcegraph](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2/-/badge.svg)](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2?badge) [![GoDoc](https://godoc.org/github.com/sourcegraph/jsonrpc2?status.svg)](https://godoc.org/github.com/sourcegraph/jsonrpc2) + + +Package jsonrpc2 provides a [Go](https://golang.org) implementation of [JSON-RPC 2.0](http://www.jsonrpc.org/specification). + +This package is **experimental** until further notice. + +[**Open the code in Sourcegraph**](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2) + +## Known issues + +* Batch requests and responses are not yet supported. diff --git a/vendor/github.com/sourcegraph/jsonrpc2/async.go b/vendor/github.com/sourcegraph/jsonrpc2/async.go new file mode 100644 index 000000000..bc8a3708a --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/async.go @@ -0,0 +1,17 @@ +package jsonrpc2 + +import "context" + +// AsyncHandler wraps a Handler such that each request is handled in its own +// goroutine. It is a convenience wrapper. +func AsyncHandler(h Handler) Handler { + return asyncHandler{h} +} + +type asyncHandler struct { + Handler +} + +func (h asyncHandler) Handle(ctx context.Context, conn *Conn, req *Request) { + go h.Handler.Handle(ctx, conn, req) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go b/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go new file mode 100644 index 000000000..b554baca8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go @@ -0,0 +1,30 @@ +package jsonrpc2 + +// CallOption is an option that can be provided to (*Conn).Call to +// configure custom behavior. See Meta. +type CallOption interface { + apply(r *Request) error +} + +type callOptionFunc func(r *Request) error + +func (c callOptionFunc) apply(r *Request) error { return c(r) } + +// Meta returns a call option which attaches the given meta object to +// the JSON-RPC 2.0 request (this is a Sourcegraph extension to JSON +// RPC 2.0 for carrying metadata). +func Meta(meta interface{}) CallOption { + return callOptionFunc(func(r *Request) error { + return r.SetMeta(meta) + }) +} + +// PickID returns a call option which sets the ID on a request. Care must be +// taken to ensure there are no conflicts with any previously picked ID, nor +// with the default sequence ID. +func PickID(id ID) CallOption { + return callOptionFunc(func(r *Request) error { + r.ID = id + return nil + }) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go b/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go new file mode 100644 index 000000000..e6346bae8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go @@ -0,0 +1,101 @@ +package jsonrpc2 + +import ( + "encoding/json" + "sync" +) + +// Logger interface implements one method - Printf. +// You can use the stdlib logger *log.Logger +type Logger interface { + Printf(format string, v ...interface{}) +} + +// ConnOpt is the type of function that can be passed to NewConn to +// customize the Conn before it is created. +type ConnOpt func(*Conn) + +// OnRecv causes all requests received on conn to invoke f(req, nil) +// and all responses to invoke f(req, resp), +func OnRecv(f func(*Request, *Response)) ConnOpt { + return func(c *Conn) { c.onRecv = append(c.onRecv, f) } +} + +// OnSend causes all requests sent on conn to invoke f(req, nil) and +// all responses to invoke f(nil, resp), +func OnSend(f func(*Request, *Response)) ConnOpt { + return func(c *Conn) { c.onSend = append(c.onSend, f) } +} + +// LogMessages causes all messages sent and received on conn to be +// logged using the provided logger. +func LogMessages(log Logger) ConnOpt { + return func(c *Conn) { + // Remember reqs we have received so we can helpfully show the + // request method in OnSend for responses. + var ( + mu sync.Mutex + reqMethods = map[ID]string{} + ) + + OnRecv(func(req *Request, resp *Response) { + switch { + case req != nil && resp == nil: + mu.Lock() + reqMethods[req.ID] = req.Method + mu.Unlock() + + params, _ := json.Marshal(req.Params) + if req.Notif { + log.Printf("--> notif: %s: %s", req.Method, params) + } else { + log.Printf("--> request #%s: %s: %s", req.ID, req.Method, params) + } + + case resp != nil: + var method string + if req != nil { + method = req.Method + } else { + method = "(no matching request)" + } + switch { + case resp.Result != nil: + result, _ := json.Marshal(resp.Result) + log.Printf("--> result #%s: %s: %s", resp.ID, method, result) + case resp.Error != nil: + err, _ := json.Marshal(resp.Error) + log.Printf("--> error #%s: %s: %s", resp.ID, method, err) + } + } + })(c) + OnSend(func(req *Request, resp *Response) { + switch { + case req != nil: + params, _ := json.Marshal(req.Params) + if req.Notif { + log.Printf("<-- notif: %s: %s", req.Method, params) + } else { + log.Printf("<-- request #%s: %s: %s", req.ID, req.Method, params) + } + + case resp != nil: + mu.Lock() + method := reqMethods[resp.ID] + delete(reqMethods, resp.ID) + mu.Unlock() + if method == "" { + method = "(no previous request)" + } + + if resp.Result != nil { + result, _ := json.Marshal(resp.Result) + log.Printf("<-- result #%s: %s: %s", resp.ID, method, result) + } else { + err, _ := json.Marshal(resp.Error) + log.Printf("<-- error #%s: %s: %s", resp.ID, method, err) + } + } + })(c) + } +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go b/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go new file mode 100644 index 000000000..6f056ccad --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go @@ -0,0 +1,64 @@ +package jsonrpc2 + +import ( + "context" + "log" +) + +// HandlerWithError implements Handler by calling the func for each +// request and handling returned errors and results. +func HandlerWithError(handleFunc func(context.Context, *Conn, *Request) (result interface{}, err error)) *HandlerWithErrorConfigurer { + return &HandlerWithErrorConfigurer{handleFunc: handleFunc} +} + +type HandlerWithErrorConfigurer struct { + handleFunc func(context.Context, *Conn, *Request) (result interface{}, err error) + suppressErrClosed bool +} + +// Handle implements Handler. +func (h *HandlerWithErrorConfigurer) Handle(ctx context.Context, conn *Conn, req *Request) { + result, err := h.handleFunc(ctx, conn, req) + if req.Notif { + if err != nil { + log.Printf("jsonrpc2 handler: notification %q handling error: %s", req.Method, err) + } + return + } + + resp := &Response{ID: req.ID} + if err == nil { + err = resp.SetResult(result) + } + if err != nil { + if e, ok := err.(*Error); ok { + resp.Error = e + } else { + resp.Error = &Error{Message: err.Error()} + } + } + + if !req.Notif { + if err := conn.SendResponse(ctx, resp); err != nil { + if err != ErrClosed || !h.suppressErrClosed { + log.Printf("jsonrpc2 handler: sending response %s: %s", resp.ID, err) + } + } + } +} + +// SuppressErrClosed makes the handler suppress jsonrpc2.ErrClosed errors from +// being logged. The original handler `h` is returned. +// +// This is optional because only in some cases is this behavior desired. For +// example, a handler that serves end-user connections may not want to log +// ErrClosed because it just indicates the end-user connection has gone away +// for any reason (they could have lost wifi connection, are no longer +// interested in the request and closed the connection, etc) and as such it +// would be log spam, whereas a handler that serves internal connections would +// never expect connections to go away unexpectedly (which could indicate +// service degradation, etc) and as such ErrClosed should always be logged. +func (h *HandlerWithErrorConfigurer) SuppressErrClosed() Handler { + h.suppressErrClosed = true + return h +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go b/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go new file mode 100644 index 000000000..3e0763d73 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go @@ -0,0 +1,691 @@ +// Package jsonrpc2 provides a client and server implementation of +// [JSON-RPC 2.0](http://www.jsonrpc.org/specification). +package jsonrpc2 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "strconv" + "sync" +) + +// JSONRPC2 describes an interface for issuing requests that speak the +// JSON-RPC 2 protocol. It isn't really necessary for this package +// itself, but is useful for external users that use the interface as +// an API boundary. +type JSONRPC2 interface { + // Call issues a standard request (http://www.jsonrpc.org/specification#request_object). + Call(ctx context.Context, method string, params, result interface{}, opt ...CallOption) error + + // Notify issues a notification request (http://www.jsonrpc.org/specification#notification). + Notify(ctx context.Context, method string, params interface{}, opt ...CallOption) error + + // Close closes the underlying connection, if it exists. + Close() error +} + +// Request represents a JSON-RPC request or +// notification. See +// http://www.jsonrpc.org/specification#request_object and +// http://www.jsonrpc.org/specification#notification. +type Request struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + ID ID `json:"id"` + Notif bool `json:"-"` + + // Meta optionally provides metadata to include in the request. + // + // NOTE: It is not part of spec. However, it is useful for propogating + // tracing context, etc. + Meta *json.RawMessage `json:"meta,omitempty"` +} + +// MarshalJSON implements json.Marshaler and adds the "jsonrpc":"2.0" +// property. +func (r Request) MarshalJSON() ([]byte, error) { + r2 := struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + ID *ID `json:"id,omitempty"` + Meta *json.RawMessage `json:"meta,omitempty"` + JSONRPC string `json:"jsonrpc"` + }{ + Method: r.Method, + Params: r.Params, + Meta: r.Meta, + JSONRPC: "2.0", + } + if !r.Notif { + r2.ID = &r.ID + } + return json.Marshal(r2) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Request) UnmarshalJSON(data []byte) error { + var r2 struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + Meta *json.RawMessage `json:"meta,omitempty"` + ID *ID `json:"id"` + } + + // Detect if the "params" field is JSON "null" or just not present + // by seeing if the field gets overwritten to nil. + r2.Params = &json.RawMessage{} + + if err := json.Unmarshal(data, &r2); err != nil { + return err + } + r.Method = r2.Method + if r2.Params == nil { + r.Params = &jsonNull + } else if len(*r2.Params) == 0 { + r.Params = nil + } else { + r.Params = r2.Params + } + r.Meta = r2.Meta + if r2.ID == nil { + r.ID = ID{} + r.Notif = true + } else { + r.ID = *r2.ID + r.Notif = false + } + return nil +} + +// SetParams sets r.Params to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Request) SetParams(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Params = (*json.RawMessage)(&b) + return nil +} + +// SetMeta sets r.Meta to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Request) SetMeta(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Meta = (*json.RawMessage)(&b) + return nil +} + +// Response represents a JSON-RPC response. See +// http://www.jsonrpc.org/specification#response_object. +type Response struct { + ID ID `json:"id"` + Result *json.RawMessage `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` + + // Meta optionally provides metadata to include in the response. + // + // NOTE: It is not part of spec. However, it is useful for propogating + // tracing context, etc. + Meta *json.RawMessage `json:"meta,omitempty"` + + // SPEC NOTE: The spec says "If there was an error in detecting + // the id in the Request object (e.g. Parse error/Invalid + // Request), it MUST be Null." If we made the ID field nullable, + // then we'd have to make it a pointer type. For simplicity, we're + // ignoring the case where there was an error in detecting the ID + // in the Request object. +} + +// MarshalJSON implements json.Marshaler and adds the "jsonrpc":"2.0" +// property. +func (r Response) MarshalJSON() ([]byte, error) { + if (r.Result == nil || len(*r.Result) == 0) && r.Error == nil { + return nil, errors.New("can't marshal *jsonrpc2.Response (must have result or error)") + } + type tmpType Response // avoid infinite MarshalJSON recursion + b, err := json.Marshal(tmpType(r)) + if err != nil { + return nil, err + } + b = append(b[:len(b)-1], []byte(`,"jsonrpc":"2.0"}`)...) + return b, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Response) UnmarshalJSON(data []byte) error { + type tmpType Response + + // Detect if the "result" field is JSON "null" or just not present + // by seeing if the field gets overwritten to nil. + *r = Response{Result: &json.RawMessage{}} + + if err := json.Unmarshal(data, (*tmpType)(r)); err != nil { + return err + } + if r.Result == nil { // JSON "null" + r.Result = &jsonNull + } else if len(*r.Result) == 0 { + r.Result = nil + } + return nil +} + +// SetResult sets r.Result to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Response) SetResult(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Result = (*json.RawMessage)(&b) + return nil +} + +// Error represents a JSON-RPC response error. +type Error struct { + Code int64 `json:"code"` + Message string `json:"message"` + Data *json.RawMessage `json:"data"` +} + +// SetError sets e.Error to the JSON representation of v. If JSON +// marshaling fails, it panics. +func (e *Error) SetError(v interface{}) { + b, err := json.Marshal(v) + if err != nil { + panic("Error.SetData: " + err.Error()) + } + e.Data = (*json.RawMessage)(&b) +} + +// Error implements the Go error interface. +func (e *Error) Error() string { + return fmt.Sprintf("jsonrpc2: code %v message: %s", e.Code, e.Message) +} + +const ( + // Errors defined in the JSON-RPC spec. See + // http://www.jsonrpc.org/specification#error_object. + CodeParseError = -32700 + CodeInvalidRequest = -32600 + CodeMethodNotFound = -32601 + CodeInvalidParams = -32602 + CodeInternalError = -32603 + codeServerErrorStart = -32099 + codeServerErrorEnd = -32000 +) + +// Handler handles JSON-RPC requests and notifications. +type Handler interface { + // Handle is called to handle a request. No other requests are handled + // until it returns. If you do not require strict ordering behaviour + // of received RPCs, it is suggested to wrap your handler in + // AsyncHandler. + Handle(context.Context, *Conn, *Request) +} + +// ID represents a JSON-RPC 2.0 request ID, which may be either a +// string or number (or null, which is unsupported). +type ID struct { + // At most one of Num or Str may be nonzero. If both are zero + // valued, then IsNum specifies which field's value is to be used + // as the ID. + Num uint64 + Str string + + // IsString controls whether the Num or Str field's value should be + // used as the ID, when both are zero valued. It must always be + // set to true if the request ID is a string. + IsString bool +} + +func (id ID) String() string { + if id.IsString { + return strconv.Quote(id.Str) + } + return strconv.FormatUint(id.Num, 10) +} + +// MarshalJSON implements json.Marshaler. +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsString { + return json.Marshal(id.Str) + } + return json.Marshal(id.Num) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (id *ID) UnmarshalJSON(data []byte) error { + // Support both uint64 and string IDs. + var v uint64 + if err := json.Unmarshal(data, &v); err == nil { + *id = ID{Num: v} + return nil + } + var v2 string + if err := json.Unmarshal(data, &v2); err != nil { + return err + } + *id = ID{Str: v2, IsString: true} + return nil +} + +// Conn is a JSON-RPC client/server connection. The JSON-RPC protocol +// is symmetric, so a Conn runs on both ends of a client-server +// connection. +type Conn struct { + stream ObjectStream + + h Handler + + mu sync.Mutex + shutdown bool + closing bool + seq uint64 + pending map[ID]*call + + sending sync.Mutex + + disconnect chan struct{} + + // Set by ConnOpt funcs. + onRecv []func(*Request, *Response) + onSend []func(*Request, *Response) +} + +var _ JSONRPC2 = (*Conn)(nil) + +// ErrClosed indicates that the JSON-RPC connection is closed (or in +// the process of closing). +var ErrClosed = errors.New("jsonrpc2: connection is closed") + +// NewConn creates a new JSON-RPC client/server connection using the +// given ReadWriteCloser (typically a TCP connection or stdio). The +// JSON-RPC protocol is symmetric, so a Conn runs on both ends of a +// client-server connection. +// +// NewClient consumes conn, so you should call Close on the returned +// client not on the given conn. +func NewConn(ctx context.Context, stream ObjectStream, h Handler, opt ...ConnOpt) *Conn { + c := &Conn{ + stream: stream, + h: h, + pending: map[ID]*call{}, + disconnect: make(chan struct{}), + } + for _, opt := range opt { + opt(c) + } + go c.readMessages(ctx) + return c +} + +// Close closes the JSON-RPC connection. The connection may not be +// used after it has been closed. +func (c *Conn) Close() error { + c.mu.Lock() + if c.shutdown || c.closing { + c.mu.Unlock() + return ErrClosed + } + c.closing = true + c.mu.Unlock() + return c.stream.Close() +} + +func (c *Conn) send(ctx context.Context, m *anyMessage, wait bool) (cc *call, err error) { + c.sending.Lock() + defer c.sending.Unlock() + + // m.request.ID could be changed, so we store a copy to correctly + // clean up pending + var id ID + + c.mu.Lock() + if c.shutdown || c.closing { + c.mu.Unlock() + return nil, ErrClosed + } + + // Store requests so we can later associate them with incoming + // responses. + if m.request != nil && wait { + cc = &call{request: m.request, seq: c.seq, done: make(chan error, 1)} + if !m.request.ID.IsString && m.request.ID.Num == 0 { + // unset, use next seq as call ID + m.request.ID.Num = c.seq + } + id = m.request.ID + c.pending[id] = cc + c.seq++ + } + c.mu.Unlock() + + if len(c.onSend) > 0 { + var ( + req *Request + resp *Response + ) + switch { + case m.request != nil: + req = m.request + case m.response != nil: + resp = m.response + } + for _, onSend := range c.onSend { + onSend(req, resp) + } + } + + // From here on, if we fail to send this, then we need to remove + // this from the pending map so we don't block on it or pile up + // pending entries for unsent messages. + defer func() { + if err != nil { + if cc != nil { + c.mu.Lock() + delete(c.pending, id) + c.mu.Unlock() + } + } + }() + + if err := c.stream.WriteObject(m); err != nil { + return nil, err + } + return cc, nil +} + +// Call initiates a JSON-RPC call using the specified method and +// params, and waits for the response. If the response is successful, +// its result is stored in result (a pointer to a value that can be +// JSON-unmarshaled into); otherwise, a non-nil error is returned. +func (c *Conn) Call(ctx context.Context, method string, params, result interface{}, opts ...CallOption) error { + req := &Request{Method: method} + if err := req.SetParams(params); err != nil { + return err + } + for _, opt := range opts { + if err := opt.apply(req); err != nil { + return err + } + } + call, err := c.send(ctx, &anyMessage{request: req}, true) + if err != nil { + return err + } + select { + case err, ok := <-call.done: + if !ok { + err = ErrClosed + } + if err != nil { + return err + } + if result != nil { + if call.response.Result == nil { + call.response.Result = &jsonNull + } + // TODO(sqs): error handling + if err := json.Unmarshal(*call.response.Result, result); err != nil { + return err + } + } + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} + +var jsonNull = json.RawMessage("null") + +// Notify is like Call, but it returns when the notification request +// is sent (without waiting for a response, because JSON-RPC +// notifications do not have responses). +func (c *Conn) Notify(ctx context.Context, method string, params interface{}, opts ...CallOption) error { + req := &Request{Method: method, Notif: true} + if err := req.SetParams(params); err != nil { + return err + } + for _, opt := range opts { + if err := opt.apply(req); err != nil { + return err + } + } + _, err := c.send(ctx, &anyMessage{request: req}, false) + return err +} + +// Reply sends a successful response with a result. +func (c *Conn) Reply(ctx context.Context, id ID, result interface{}) error { + resp := &Response{ID: id} + if err := resp.SetResult(result); err != nil { + return err + } + _, err := c.send(ctx, &anyMessage{response: resp}, false) + return err +} + +// ReplyWithError sends a response with an error. +func (c *Conn) ReplyWithError(ctx context.Context, id ID, respErr *Error) error { + _, err := c.send(ctx, &anyMessage{response: &Response{ID: id, Error: respErr}}, false) + return err +} + +// SendResponse sends resp to the peer. It is lower level than (*Conn).Reply. +func (c *Conn) SendResponse(ctx context.Context, resp *Response) error { + _, err := c.send(ctx, &anyMessage{response: resp}, false) + return err +} + +// DisconnectNotify returns a channel that is closed when the +// underlying connection is disconnected. +func (c *Conn) DisconnectNotify() <-chan struct{} { + return c.disconnect +} + +func (c *Conn) readMessages(ctx context.Context) { + var err error + for err == nil { + var m anyMessage + err = c.stream.ReadObject(&m) + if err != nil { + break + } + + switch { + case m.request != nil: + for _, onRecv := range c.onRecv { + onRecv(m.request, nil) + } + c.h.Handle(ctx, c, m.request) + + case m.response != nil: + resp := m.response + if resp != nil { + id := resp.ID + c.mu.Lock() + call := c.pending[id] + delete(c.pending, id) + c.mu.Unlock() + + if call != nil { + call.response = resp + } + + if len(c.onRecv) > 0 { + var req *Request + if call != nil { + req = call.request + } + for _, onRecv := range c.onRecv { + onRecv(req, resp) + } + } + + switch { + case call == nil: + log.Printf("jsonrpc2: ignoring response #%s with no corresponding request", id) + + case resp.Error != nil: + call.done <- resp.Error + close(call.done) + + default: + call.done <- nil + close(call.done) + } + } + } + } + + c.sending.Lock() + c.mu.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrClosed + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.done <- err + close(call.done) + } + c.mu.Unlock() + c.sending.Unlock() + if err != io.ErrUnexpectedEOF && !closing { + log.Println("jsonrpc2: protocol error:", err) + } + close(c.disconnect) +} + +// call represents a JSON-RPC call over its entire lifecycle. +type call struct { + request *Request + response *Response + seq uint64 // the seq of the request + done chan error +} + +// anyMessage represents either a JSON Request or Response. +type anyMessage struct { + request *Request + response *Response +} + +func (m anyMessage) MarshalJSON() ([]byte, error) { + var v interface{} + switch { + case m.request != nil && m.response == nil: + v = m.request + case m.request == nil && m.response != nil: + v = m.response + } + if v != nil { + return json.Marshal(v) + } + return nil, errors.New("jsonrpc2: message must have exactly one of the request or response fields set") +} + +func (m *anyMessage) UnmarshalJSON(data []byte) error { + // The presence of these fields distinguishes between the 2 + // message types. + type msg struct { + ID interface{} `json:"id"` + Method *string `json:"method"` + Result anyValueWithExplicitNull `json:"result"` + Error interface{} `json:"error"` + } + + var isRequest, isResponse bool + checkType := func(m *msg) error { + mIsRequest := m.Method != nil + mIsResponse := m.Result.null || m.Result.value != nil || m.Error != nil + if (!mIsRequest && !mIsResponse) || (mIsRequest && mIsResponse) { + return errors.New("jsonrpc2: unable to determine message type (request or response)") + } + if (mIsRequest && isResponse) || (mIsResponse && isRequest) { + return errors.New("jsonrpc2: batch message type mismatch (must be all requests or all responses)") + } + isRequest = mIsRequest + isResponse = mIsResponse + return nil + } + + if isArray := len(data) > 0 && data[0] == '['; isArray { + var msgs []msg + if err := json.Unmarshal(data, &msgs); err != nil { + return err + } + if len(msgs) == 0 { + return errors.New("jsonrpc2: invalid empty batch") + } + for _, msg := range msgs { + if err := checkType(&msg); err != nil { + return err + } + } + } else { + var msg msg + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if err := checkType(&msg); err != nil { + return err + } + } + + var v interface{} + switch { + case isRequest && !isResponse: + v = &m.request + case !isRequest && isResponse: + v = &m.response + } + if err := json.Unmarshal(data, v); err != nil { + return err + } + if !isRequest && isResponse && m.response.Error == nil && m.response.Result == nil { + m.response.Result = &jsonNull + } + return nil +} + +// anyValueWithExplicitNull is used to distinguish {} from +// {"result":null} by anyMessage's JSON unmarshaler. +type anyValueWithExplicitNull struct { + null bool // JSON "null" + value interface{} +} + +func (v anyValueWithExplicitNull) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *anyValueWithExplicitNull) UnmarshalJSON(data []byte) error { + data = bytes.TrimSpace(data) + if string(data) == "null" { + *v = anyValueWithExplicitNull{null: true} + return nil + } + *v = anyValueWithExplicitNull{} + return json.Unmarshal(data, &v.value) +} + +var ( + errInvalidRequestJSON = errors.New("jsonrpc2: request must be either a JSON object or JSON array") + errInvalidResponseJSON = errors.New("jsonrpc2: response must be either a JSON object or JSON array") +) diff --git a/vendor/github.com/sourcegraph/jsonrpc2/stream.go b/vendor/github.com/sourcegraph/jsonrpc2/stream.go new file mode 100644 index 000000000..f38c026ac --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/stream.go @@ -0,0 +1,164 @@ +package jsonrpc2 + +import ( + "bufio" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "sync" +) + +// An ObjectStream is a bidirectional stream of JSON-RPC 2.0 objects. +type ObjectStream interface { + // WriteObject writes a JSON-RPC 2.0 object to the stream. + WriteObject(obj interface{}) error + + // ReadObject reads the next JSON-RPC 2.0 object from the stream + // and stores it in the value pointed to by v. + ReadObject(v interface{}) error + + io.Closer +} + +// A bufferedObjectStream is an ObjectStream that uses a buffered +// io.ReadWriteCloser to send and receive objects. +type bufferedObjectStream struct { + conn io.Closer // all writes should go through w, all reads through r + w *bufio.Writer + r *bufio.Reader + + codec ObjectCodec + + mu sync.Mutex +} + +// NewBufferedStream creates a buffered stream from a network +// connection (or other similar interface). The underlying +// objectStream is used to produce the bytes to write to the stream +// for the JSON-RPC 2.0 objects. +func NewBufferedStream(conn io.ReadWriteCloser, codec ObjectCodec) ObjectStream { + return &bufferedObjectStream{ + conn: conn, + w: bufio.NewWriter(conn), + r: bufio.NewReader(conn), + codec: codec, + } +} + +// WriteObject implements ObjectStream. +func (t *bufferedObjectStream) WriteObject(obj interface{}) error { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.codec.WriteObject(t.w, obj); err != nil { + return err + } + return t.w.Flush() +} + +// ReadObject implements ObjectStream. +func (t *bufferedObjectStream) ReadObject(v interface{}) error { + return t.codec.ReadObject(t.r, v) +} + +// Close implements ObjectStream. +func (t *bufferedObjectStream) Close() error { + return t.conn.Close() +} + +// An ObjectCodec specifies how to encoed and decode a JSON-RPC 2.0 +// object in a stream. +type ObjectCodec interface { + // WriteObject writes a JSON-RPC 2.0 object to the stream. + WriteObject(stream io.Writer, obj interface{}) error + + // ReadObject reads the next JSON-RPC 2.0 object from the stream + // and stores it in the value pointed to by v. + ReadObject(stream *bufio.Reader, v interface{}) error +} + +// VarintObjectCodec reads/writes JSON-RPC 2.0 objects with a varint +// header that encodes the byte length. +type VarintObjectCodec struct{} + +// WriteObject implements ObjectCodec. +func (VarintObjectCodec) WriteObject(stream io.Writer, obj interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + var buf [binary.MaxVarintLen64]byte + b := binary.PutUvarint(buf[:], uint64(len(data))) + if _, err := stream.Write(buf[:b]); err != nil { + return err + } + if _, err := stream.Write(data); err != nil { + return err + } + return nil +} + +// ReadObject implements ObjectCodec. +func (VarintObjectCodec) ReadObject(stream *bufio.Reader, v interface{}) error { + b, err := binary.ReadUvarint(stream) + if err != nil { + return err + } + return json.NewDecoder(io.LimitReader(stream, int64(b))).Decode(v) +} + +// VSCodeObjectCodec reads/writes JSON-RPC 2.0 objects with +// Content-Length and Content-Type headers, as specified by +// https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md#base-protocol. +type VSCodeObjectCodec struct{} + +// WriteObject implements ObjectCodec. +func (VSCodeObjectCodec) WriteObject(stream io.Writer, obj interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + if _, err := fmt.Fprintf(stream, "Content-Length: %d\r\n\r\n", len(data)); err != nil { + return err + } + if _, err := stream.Write(data); err != nil { + return err + } + return nil +} + +// ReadObject implements ObjectCodec. +func (VSCodeObjectCodec) ReadObject(stream *bufio.Reader, v interface{}) error { + var contentLength uint64 + for { + line, err := stream.ReadString('\r') + if err != nil { + return err + } + b, err := stream.ReadByte() + if err != nil { + return err + } + if b != '\n' { + return fmt.Errorf(`jsonrpc2: line endings must be \r\n`) + } + if line == "\r" { + break + } + if strings.HasPrefix(line, "Content-Length: ") { + line = strings.TrimPrefix(line, "Content-Length: ") + line = strings.TrimSpace(line) + var err error + contentLength, err = strconv.ParseUint(line, 10, 32) + if err != nil { + return err + } + } + } + if contentLength == 0 { + return fmt.Errorf("jsonrpc2: no Content-Length header found") + } + return json.NewDecoder(io.LimitReader(stream, int64(contentLength))).Decode(v) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go b/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go new file mode 100644 index 000000000..26313a07b --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go @@ -0,0 +1,44 @@ +// Package websocket provides WebSocket transport support for JSON-RPC +// 2.0. +package websocket + +import ( + "io" + + "github.com/gorilla/websocket" +) + +// A ObjectStream is a jsonrpc2.ObjectStream that uses a WebSocket to +// send and receive JSON-RPC 2.0 objects. +type ObjectStream struct { + conn *websocket.Conn +} + +// NewObjectStream creates a new jsonrpc2.ObjectStream for sending and +// receiving JSON-RPC 2.0 objects over a WebSocket. +func NewObjectStream(conn *websocket.Conn) ObjectStream { + return ObjectStream{conn: conn} +} + +// WriteObject implements jsonrpc2.ObjectStream. +func (t ObjectStream) WriteObject(obj interface{}) error { + return t.conn.WriteJSON(obj) +} + +// ReadObject implements jsonrpc2.ObjectStream. +func (t ObjectStream) ReadObject(v interface{}) error { + err := t.conn.ReadJSON(v) + if e, ok := err.(*websocket.CloseError); ok { + if e.Code == websocket.CloseAbnormalClosure && e.Text == io.ErrUnexpectedEOF.Error() { + // Suppress a noisy (but harmless) log message by + // unwrapping this error. + err = io.ErrUnexpectedEOF + } + } + return err +} + +// Close implements jsonrpc2.ObjectStream. +func (t ObjectStream) Close() error { + return t.conn.Close() +} diff --git a/vendor/github.com/zserge/metric/LICENSE b/vendor/github.com/zserge/metric/LICENSE new file mode 100644 index 000000000..79a518d45 --- /dev/null +++ b/vendor/github.com/zserge/metric/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Serge Zaitsev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zserge/metric/README.md b/vendor/github.com/zserge/metric/README.md new file mode 100644 index 000000000..bbfac3e92 --- /dev/null +++ b/vendor/github.com/zserge/metric/README.md @@ -0,0 +1,57 @@ +# metric + +[![Build Status](https://travis-ci.org/zserge/metric.svg?branch=master)](https://travis-ci.org/zserge/metric) +[![GoDoc](https://godoc.org/github.com/zserge/metric?status.svg)](https://godoc.org/github.com/zserge/metric) +[![Go Report Card](https://goreportcard.com/badge/github.com/zserge/metric)](https://goreportcard.com/report/github.com/zserge/metric) + +Package provides simple uniform interface for metrics such as counters, +gauges and histograms. It keeps track of metrics in runtime and can be used for +some basic web service instrumentation in Go, where complex tools such as +Prometheus or InfluxDB are not required. + +It is compatible with [expvar](https://golang.org/pkg/expvar/) package, that is +also commonly used for monitoring. + +## Usage + +```go +// Create new metric. All metrics may take time frames if you want them to keep +// history. If no time frames are given the metric only keeps track of a single +// current value. +c := metric.NewCounter("15m10s") // 15 minutes of history with 10 second precision +// Increment counter +c.Add(1) +// Return JSON with all recorded counter values +c.String() // Or json.Marshal(c) + +// With expvar + +// Register a metric +expvar.Publish("latency", metric.NewHistogram("5m1s", "15m30s", "1h1m")) +// Register HTTP handler to visualize metrics +http.Handle("/debug/metrics", metric.Handler(metric.Exposed)) + +// Measure time and update the metric +start := time.Now() +... +expvar.Get("latency").(metric.Metric).Add(time.Since(start).Seconds()) +``` + +Metrics are thread-safe and can be updated from background goroutines. + +## Web UI + +Nothing fancy, really, but still better than reading plain JSON. No javascript, +only good old HTML, CSS and SVG. + +![web ui](example/screenshot.png) + +Of course you may customize a list of metrics to show in the web UI. + +If you need precise values - you may use `/debug/vars` HTTP endpoint provided +by `expvar`. + +## License + +Code is distributed under MIT license, feel free to use it in your proprietary +projects as well. diff --git a/vendor/github.com/zserge/metric/go.mod b/vendor/github.com/zserge/metric/go.mod new file mode 100644 index 000000000..0e4037d36 --- /dev/null +++ b/vendor/github.com/zserge/metric/go.mod @@ -0,0 +1 @@ +module github.com/zserge/metric diff --git a/vendor/github.com/zserge/metric/handler.go b/vendor/github.com/zserge/metric/handler.go new file mode 100644 index 000000000..b83632cbb --- /dev/null +++ b/vendor/github.com/zserge/metric/handler.go @@ -0,0 +1,197 @@ +package metric + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "sort" + "strings" + + "html/template" +) + +var ( + page = template.Must(template.New(""). + Funcs(template.FuncMap{"path": path, "duration": duration}). + Parse(` + + +Metrics report + + + +
+

    __          __
+.--------..-----.|  |_ .----.|__|.----..-----.
+|        ||  -__||   _||   _||  ||  __||__ --|
+|__|__|__||_____||____||__|  |__||____||_____|
+
+
+

+{{ range . }} +
+

{{ .name }}

+
+ {{ if .type }} +
+ {{ template "table" . }} +
+
+ {{ else if .interval }} +
{{ template "timeseries" . }}
+ {{ else if .metrics}} + {{ range .metrics }} +
+ {{ template "timeseries" . }} +
+ {{ end }} + {{ end }} +
+
+{{ end }} +
+ + +{{ define "table" }} + + {{ if eq .type "c" }} + + {{ else if eq .type "g" }} + + + {{ else if eq .type "h" }} + + + {{ end }} +
count
{{ printf "%.2g" .count }}
meanminmax
{{printf "%.2g" .mean}}{{printf "%.2g" .min}}{{printf "%.2g" .max}}
P.50P.90P.99
{{printf "%.2g" .p50}}{{printf "%.2g" .p90}}{{printf "%.2g" .p99}}
+{{ end }} +{{ define "timeseries" }} + {{ template "table" .total }} +
+
+
{{ duration .samples .interval }}
+ + {{ if eq (index (index .samples 0) "type") "c" }} + {{ range (path .samples "count") }}{{end}} + {{ else if eq (index (index .samples 0) "type") "g" }} + {{ range (path .samples "min" "max" "mean" ) }}{{end}} + {{ else if eq (index (index .samples 0) "type") "h" }} + {{ range (path .samples "p50" "p90" "p99") }}{{end}} + {{ end }} + +
+
+{{ end }} +`)) +) + +func path(samples []interface{}, keys ...string) []string { + var min, max float64 + paths := make([]string, len(keys), len(keys)) + for i := 0; i < len(samples); i++ { + s := samples[i].(map[string]interface{}) + for _, k := range keys { + x := s[k].(float64) + if i == 0 || x < min { + min = x + } + if i == 0 || x > max { + max = x + } + } + } + for i := 0; i < len(samples); i++ { + s := samples[i].(map[string]interface{}) + for j, k := range keys { + v := s[k].(float64) + x := float64(i+1) / float64(len(samples)) + y := (v - min) / (max - min) + if max == min { + y = 0 + } + if i == 0 { + paths[j] = fmt.Sprintf("M%f %f", 0.0, (1-y)*18+1) + } + paths[j] += fmt.Sprintf(" L%f %f", x*100, (1-y)*18+1) + } + } + return paths +} + +func duration(samples []interface{}, n float64) string { + n = n * float64(len(samples)) + if n < 60 { + return fmt.Sprintf("%d sec", int(n)) + } else if n < 60*60 { + return fmt.Sprintf("%d min", int(n/60)) + } else if n < 24*60*60 { + return fmt.Sprintf("%d hrs", int(n/60/60)) + } + return fmt.Sprintf("%d days", int(n/24/60/60)) +} + +// Handler returns an http.Handler that renders web UI for all provided metrics. +func Handler(snapshot func() map[string]Metric) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + type h map[string]interface{} + metrics := []h{} + for name, metric := range snapshot() { + m := h{} + b, _ := json.Marshal(metric) + json.Unmarshal(b, &m) + m["name"] = name + metrics = append(metrics, m) + } + sort.Slice(metrics, func(i, j int) bool { + n1 := metrics[i]["name"].(string) + n2 := metrics[j]["name"].(string) + return strings.Compare(n1, n2) < 0 + }) + page.Execute(w, metrics) + }) +} + +// Exposed returns a map of exposed metrics (see expvar package). +func Exposed() map[string]Metric { + m := map[string]Metric{} + expvar.Do(func(kv expvar.KeyValue) { + if metric, ok := kv.Value.(Metric); ok { + m[kv.Key] = metric + } + }) + return m +} diff --git a/vendor/github.com/zserge/metric/metric.go b/vendor/github.com/zserge/metric/metric.go new file mode 100644 index 000000000..658fef09c --- /dev/null +++ b/vendor/github.com/zserge/metric/metric.go @@ -0,0 +1,385 @@ +package metric + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// To mock time in tests +var now = time.Now + +// Metric is a single meter (counter, gauge or histogram, optionally - with history) +type Metric interface { + Add(n float64) + String() string +} + +// metric is an extended private interface with some additional internal +// methods used by timeseries. Counters, gauges and histograms implement it. +type metric interface { + Metric + Reset() + Aggregate(roll int, samples []metric) +} + +var _, _, _ metric = &counter{}, &gauge{}, &histogram{} + +// NewCounter returns a counter metric that increments the value with each +// incoming number. +func NewCounter(frames ...string) Metric { + return newMetric(func() metric { return &counter{} }, frames...) +} + +// NewGauge returns a gauge metric that sums up the incoming values and returns +// mean/min/max of the resulting distribution. +func NewGauge(frames ...string) Metric { + return newMetric(func() metric { return &gauge{} }, frames...) +} + +// NewHistogram returns a histogram metric that calculates 50%, 90% and 99% +// percentiles of the incoming numbers. +func NewHistogram(frames ...string) Metric { + return newMetric(func() metric { return &histogram{} }, frames...) +} + +type timeseries struct { + sync.Mutex + now time.Time + size int + interval time.Duration + total metric + samples []metric +} + +func (ts *timeseries) Reset() { + ts.total.Reset() + for _, s := range ts.samples { + s.Reset() + } +} + +func (ts *timeseries) roll() { + t := now() + roll := int((t.Round(ts.interval).Sub(ts.now.Round(ts.interval))) / ts.interval) + ts.now = t + n := len(ts.samples) + if roll <= 0 { + return + } + if roll >= len(ts.samples) { + ts.Reset() + } else { + for i := 0; i < roll; i++ { + tmp := ts.samples[n-1] + for j := n - 1; j > 0; j-- { + ts.samples[j] = ts.samples[j-1] + } + ts.samples[0] = tmp + ts.samples[0].Reset() + } + ts.total.Aggregate(roll, ts.samples) + } +} + +func (ts *timeseries) Add(n float64) { + ts.Lock() + defer ts.Unlock() + ts.roll() + ts.total.Add(n) + ts.samples[0].Add(n) +} + +func (ts *timeseries) MarshalJSON() ([]byte, error) { + ts.Lock() + defer ts.Unlock() + ts.roll() + return json.Marshal(struct { + Interval float64 `json:"interval"` + Total Metric `json:"total"` + Samples []metric `json:"samples"` + }{float64(ts.interval) / float64(time.Second), ts.total, ts.samples}) +} + +func (ts *timeseries) String() string { + ts.Lock() + defer ts.Unlock() + ts.roll() + return ts.total.String() +} + +type multimetric []*timeseries + +func (mm multimetric) Add(n float64) { + for _, m := range mm { + m.Add(n) + } +} + +func (mm multimetric) MarshalJSON() ([]byte, error) { + b := []byte(`{"metrics":[`) + for i, m := range mm { + if i != 0 { + b = append(b, ',') + } + x, _ := json.Marshal(m) + b = append(b, x...) + } + b = append(b, ']', '}') + return b, nil +} + +func (mm multimetric) String() string { + return mm[len(mm)-1].String() +} + +type counter struct { + count uint64 +} + +func (c *counter) String() string { return strconv.FormatFloat(c.value(), 'g', -1, 64) } +func (c *counter) Reset() { atomic.StoreUint64(&c.count, math.Float64bits(0)) } +func (c *counter) value() float64 { return math.Float64frombits(atomic.LoadUint64(&c.count)) } +func (c *counter) Add(n float64) { + for { + old := math.Float64frombits(atomic.LoadUint64(&c.count)) + new := old + n + if atomic.CompareAndSwapUint64(&c.count, math.Float64bits(old), math.Float64bits(new)) { + return + } + } +} +func (c *counter) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string `json:"type"` + Count float64 `json:"count"` + }{"c", c.value()}) +} + +func (c *counter) Aggregate(roll int, samples []metric) { + c.Reset() + for _, s := range samples { + c.Add(s.(*counter).value()) + } +} + +type gauge struct { + sync.Mutex + value float64 + sum float64 + min float64 + max float64 + count int +} + +func (g *gauge) String() string { return strconv.FormatFloat(g.value, 'g', -1, 64) } +func (g *gauge) Reset() { + g.Lock() + defer g.Unlock() + g.value, g.count, g.sum, g.min, g.max = 0, 0, 0, 0, 0 +} +func (g *gauge) Add(n float64) { + g.Lock() + defer g.Unlock() + if n < g.min || g.count == 0 { + g.min = n + } + if n > g.max || g.count == 0 { + g.max = n + } + g.value = n + g.sum += n + g.count++ +} +func (g *gauge) MarshalJSON() ([]byte, error) { + g.Lock() + defer g.Unlock() + return json.Marshal(struct { + Type string `json:"type"` + Value float64 `json:"value"` + Mean float64 `json:"mean"` + Min float64 `json:"min"` + Max float64 `json:"max"` + }{"g", g.value, g.mean(), g.min, g.max}) +} +func (g *gauge) mean() float64 { + if g.count == 0 { + return 0 + } + return g.sum / float64(g.count) +} +func (g *gauge) Aggregate(roll int, samples []metric) { + g.Reset() + g.Lock() + defer g.Unlock() + for i := len(samples) - 1; i >= 0; i-- { + s := samples[i].(*gauge) + s.Lock() + if s.count == 0 { + s.Unlock() + continue + } + if g.min > s.min || g.count == 0 { + g.min = s.min + } + if g.max < s.max || g.count == 0 { + g.max = s.max + } + g.count += s.count + g.sum += s.sum + g.value = s.value + s.Unlock() + } +} + +const maxBins = 100 + +type bin struct { + value float64 + count float64 +} + +type histogram struct { + sync.Mutex + bins []bin + total float64 +} + +func (h *histogram) String() string { + return fmt.Sprintf(`{"p50":%g,"p90":%g,"p99":%g}`, h.quantile(0.5), h.quantile(0.9), h.quantile(0.99)) +} + +func (h *histogram) Reset() { + h.Lock() + defer h.Unlock() + h.bins = nil + h.total = 0 +} + +func (h *histogram) Add(n float64) { + h.Lock() + defer h.Unlock() + defer h.trim() + h.total = h.total + 1 + newbin := bin{value: n, count: 1} + for i := range h.bins { + if h.bins[i].value > n { + h.bins = append(h.bins[:i], append([]bin{newbin}, h.bins[i:]...)...) + return + } + } + + h.bins = append(h.bins, newbin) +} + +func (h *histogram) MarshalJSON() ([]byte, error) { + h.Lock() + defer h.Unlock() + return json.Marshal(struct { + Type string `json:"type"` + P50 float64 `json:"p50"` + P90 float64 `json:"p90"` + P99 float64 `json:"p99"` + }{"h", h.quantile(0.5), h.quantile(0.9), h.quantile(0.99)}) +} + +func (h *histogram) trim() { + for len(h.bins) > maxBins { + d := float64(0) + i := 0 + for j := 1; j < len(h.bins); j++ { + if dv := h.bins[j].value - h.bins[j-1].value; dv < d || j == 1 { + d = dv + i = j + } + } + count := h.bins[i-1].count + h.bins[i].count + merged := bin{ + value: (h.bins[i-1].value*h.bins[i-1].count + h.bins[i].value*h.bins[i].count) / count, + count: count, + } + h.bins = append(h.bins[:i-1], h.bins[i:]...) + h.bins[i-1] = merged + } +} + +func (h *histogram) bin(q float64) bin { + count := q * h.total + for i := range h.bins { + count -= float64(h.bins[i].count) + if count <= 0 { + return h.bins[i] + } + } + return bin{} +} + +func (h *histogram) quantile(q float64) float64 { + return h.bin(q).value +} + +func (h *histogram) Aggregate(roll int, samples []metric) { + h.Lock() + defer h.Unlock() + alpha := 2 / float64(len(samples)+1) + h.total = 0 + for i := range h.bins { + h.bins[i].count = h.bins[i].count * math.Pow(1-alpha, float64(roll)) + h.total = h.total + h.bins[i].count + } +} + +func newTimeseries(builder func() metric, frame string) *timeseries { + var ( + totalNum, intervalNum int + totalUnit, intervalUnit rune + ) + units := map[rune]time.Duration{ + 's': time.Second, + 'm': time.Minute, + 'h': time.Hour, + 'd': time.Hour * 24, + 'w': time.Hour * 24 * 7, + 'M': time.Hour * 24 * 7 * 30, + 'y': time.Hour * 24 * 7 * 365, + } + fmt.Sscanf(frame, "%d%c%d%c", &totalNum, &totalUnit, &intervalNum, &intervalUnit) + interval := units[intervalUnit] * time.Duration(intervalNum) + if interval == 0 { + interval = time.Minute + } + totalDuration := units[totalUnit] * time.Duration(totalNum) + if totalDuration == 0 { + totalDuration = interval * 15 + } + n := int(totalDuration / interval) + samples := make([]metric, n, n) + for i := 0; i < n; i++ { + samples[i] = builder() + } + totalMetric := builder() + return ×eries{interval: interval, total: totalMetric, samples: samples} +} + +func newMetric(builder func() metric, frames ...string) Metric { + if len(frames) == 0 { + return builder() + } + if len(frames) == 1 { + return newTimeseries(builder, frames[0]) + } + mm := multimetric{} + for _, frame := range frames { + mm = append(mm, newTimeseries(builder, frame)) + } + sort.Slice(mm, func(i, j int) bool { + a, b := mm[i], mm[j] + return a.interval.Seconds()*float64(len(a.samples)) < b.interval.Seconds()*float64(len(b.samples)) + }) + return mm +} diff --git a/worker/chainbusservice.go b/worker/chainbusservice.go index 5ac6aa11a..2da540916 100644 --- a/worker/chainbusservice.go +++ b/worker/chainbusservice.go @@ -48,7 +48,7 @@ type BusService struct { lock sync.RWMutex // a lock for the map blockCount uint32 sqlChainProfiles map[proto.DatabaseID]*types.SQLChainProfile - sqlChainState map[proto.DatabaseID](map[proto.AccountAddress]*types.PermStat) + sqlChainState map[proto.DatabaseID]map[proto.AccountAddress]*types.PermStat } // NewBusService creates a new chain bus instance. @@ -87,7 +87,7 @@ func (bs *BusService) updateState(count uint32, profiles []*types.SQLChainProfil defer bs.lock.Unlock() var ( rebuilt = make(map[proto.DatabaseID]*types.SQLChainProfile) - sqlchainState = make(map[proto.DatabaseID](map[proto.AccountAddress]*types.PermStat)) + sqlchainState = make(map[proto.DatabaseID]map[proto.AccountAddress]*types.PermStat) ) for _, v := range profiles { rebuilt[v.ID] = v diff --git a/worker/chainbusservice_test.go b/worker/chainbusservice_test.go index 2429061e6..8fd721e2d 100644 --- a/worker/chainbusservice_test.go +++ b/worker/chainbusservice_test.go @@ -94,7 +94,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) @@ -116,7 +116,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) diff --git a/worker/db.go b/worker/db.go index 5a2043eb1..fddf3775c 100644 --- a/worker/db.go +++ b/worker/db.go @@ -25,6 +25,8 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/crypto" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" @@ -34,6 +36,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" + x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -54,7 +57,16 @@ const ( PrepareThreshold = 1.0 // CommitThreshold defines the commit complete threshold. - CommitThreshold = 1.0 + CommitThreshold = 0.0 + + // PrepareTimeout defines the prepare timeout config. + PrepareTimeout = 10 * time.Second + + // CommitTimeout defines the commit timeout config. + CommitTimeout = time.Minute + + // LogWaitTimeout defines the missing log wait timeout config. + LogWaitTimeout = 1 * time.Second // SlowQuerySampleSize defines the maximum slow query log size (default: 1KB). SlowQuerySampleSize = 1 << 10 @@ -72,6 +84,8 @@ type Database struct { chain *sqlchain.Chain nodeID proto.NodeID mux *DBKayakMuxService + privateKey *asymmetric.PrivateKey + accountAddr proto.AccountAddress } // NewDatabase create a single database instance using config. @@ -87,12 +101,25 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, return } + // get private key + var privateKey *asymmetric.PrivateKey + if privateKey, err = kms.GetLocalPrivateKey(); err != nil { + return + } + + var accountAddr proto.AccountAddress + if accountAddr, err = crypto.PubKeyHash(privateKey.PubKey()); err != nil { + return + } + // init database db = &Database{ cfg: cfg, dbID: cfg.DatabaseID, mux: cfg.KayakMux, connSeqEvictCh: make(chan uint64, 1), + privateKey: privateKey, + accountAddr: accountAddr, } defer func() { @@ -143,6 +170,8 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, QueryTTL: conf.GConf.SQLChainTTL, UpdatePeriod: cfg.UpdateBlockCount, + + IsolationLevel: cfg.IsolationLevel, } if db.chain, err = sqlchain.NewChain(chainCfg); err != nil { return @@ -161,14 +190,16 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, Handler: db, PrepareThreshold: PrepareThreshold, CommitThreshold: CommitThreshold, - PrepareTimeout: time.Second, - CommitTimeout: time.Second * 60, + PrepareTimeout: PrepareTimeout, + CommitTimeout: CommitTimeout, + LogWaitTimeout: LogWaitTimeout, Peers: peers, Wal: db.kayakWal, NodeID: db.nodeID, InstanceID: string(db.dbID), ServiceName: DBKayakRPCName, - MethodName: DBKayakMethodName, + ApplyMethodName: DBKayakApplyMethodName, + FetchMethodName: DBKayakFetchMethodName, } // create kayak runtime @@ -206,6 +237,7 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err var ( isSlowQuery uint32 + tracker *x.QueryTracker tmStart = time.Now() ) @@ -225,18 +257,44 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: - return db.chain.Query(request) + if tracker, response, err = db.chain.Query(request, false); err != nil { + err = errors.Wrap(err, "failed to query read query") + return + } case types.WriteQuery: if db.cfg.UseEventualConsistency { // reset context request.SetContext(context.Background()) - return db.chain.Query(request) + if tracker, response, err = db.chain.Query(request, true); err != nil { + err = errors.Wrap(err, "failed to execute with eventual consistency") + return + } + } else { + if tracker, response, err = db.writeQuery(request); err != nil { + err = errors.Wrap(err, "failed to execute") + return + } } - return db.writeQuery(request) default: // TODO(xq262144): verbose errors with custom error structure return nil, errors.Wrap(ErrInvalidRequest, "invalid query type") } + + response.Header.ResponseAccount = db.accountAddr + + // build hash + if err = response.BuildHash(); err != nil { + err = errors.Wrap(err, "failed to build response hash") + return + } + + if err = db.chain.AddResponse(&response.Header); err != nil { + log.WithError(err).Debug("failed to add response to index") + return + } + tracker.UpdateResp(response) + + return } func (db *Database) logSlow(request *types.Request, isFinished bool, tmStart time.Time) { @@ -335,12 +393,7 @@ func (db *Database) Destroy() (err error) { return } -func (db *Database) writeQuery(request *types.Request) (response *types.Response, err error) { - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "writeQuery") - //defer task.End() - //defer trace.StartRegion(ctx, "writeQueryRegion").End() - +func (db *Database) writeQuery(request *types.Request) (tracker *x.QueryTracker, response *types.Response, err error) { // check database size first, wal/kayak/chain database size is not included if db.cfg.SpaceLimit > 0 { path := filepath.Join(db.cfg.DataDir, StorageFileName) @@ -365,12 +418,16 @@ func (db *Database) writeQuery(request *types.Request) (response *types.Response return } - var ok bool - if response, ok = (result).(*types.Response); !ok { + var ( + tr *TrackerAndResponse + ok bool + ) + if tr, ok = (result).(*TrackerAndResponse); !ok { err = errors.Wrap(err, "invalid response type") return } - + tracker = tr.Tracker + response = tr.Response return } diff --git a/worker/db_config.go b/worker/db_config.go index 410503232..97270a627 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -35,5 +35,6 @@ type DBConfig struct { UpdateBlockCount uint64 UseEventualConsistency bool ConsistencyLevel float64 + IsolationLevel int SlowQueryTime time.Duration } diff --git a/worker/db_storage.go b/worker/db_storage.go index 1b49fce83..6d83715db 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -23,6 +23,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" + x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -30,6 +31,13 @@ import ( // EncodePayload implements kayak.types.Handler.EncodePayload. func (db *Database) EncodePayload(request interface{}) (data []byte, err error) { + if req, ok := request.(*types.Request); ok { + data = req.GetMarshalCache() + if data != nil { + return + } + } + var buf *bytes.Buffer if buf, err = utils.EncodeMsgPack(request); err != nil { @@ -50,6 +58,7 @@ func (db *Database) DecodePayload(data []byte) (request interface{}, err error) return } + req.SetMarshalCache(data) request = req return @@ -90,11 +99,21 @@ func (db *Database) Check(rawReq interface{}) (err error) { return } +// TrackerAndResponse defines a query tracker used by xenomint and an unsigned response. +type TrackerAndResponse struct { + Tracker *x.QueryTracker + Response *types.Response +} + // Commit implements kayak.types.Handler.Commit. -func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { +func (db *Database) Commit(rawReq interface{}, isLeader bool) (result interface{}, err error) { // convert query and check syntax - var req *types.Request - var ok bool + var ( + req *types.Request + response *types.Response + tracker *x.QueryTracker + ok bool + ) if req, ok = rawReq.(*types.Request); !ok || req == nil { err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return @@ -104,7 +123,14 @@ func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { req.SetContext(context.Background()) // execute - return db.chain.Query(req) + if tracker, response, err = db.chain.Query(req, isLeader); err != nil { + return + } + result = &TrackerAndResponse{ + Tracker: tracker, + Response: response, + } + return } func (db *Database) recordSequence(connID uint64, seqNo uint64) { diff --git a/worker/db_test.go b/worker/db_test.go index aac4ea475..e8e83d58c 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -24,6 +24,7 @@ import ( "math/rand" "os" "path/filepath" + "reflect" "runtime" "strings" "sync" @@ -109,8 +110,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) // test show tables query var readQuery *types.Request @@ -121,8 +120,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -137,8 +134,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -153,8 +148,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -171,8 +164,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -189,8 +180,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -207,8 +196,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -228,8 +215,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // test select query @@ -241,8 +226,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -267,8 +250,6 @@ func TestSingleDatabase(t *testing.T) { // request once res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // request again with same sequence @@ -314,8 +295,6 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -357,8 +336,6 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(0)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -380,8 +357,9 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) + + // wait for callback to sign signature + time.Sleep(time.Millisecond * 10) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -524,8 +502,6 @@ func TestDatabaseRecycle(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // test select query @@ -537,8 +513,6 @@ func TestDatabaseRecycle(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -555,6 +529,52 @@ func TestDatabaseRecycle(t *testing.T) { }) } +func TestDatabase_EncodePayload(t *testing.T) { + Convey("encode payload cache", t, func() { + db := &Database{} + req := &types.Request{ + Envelope: proto.Envelope{ + Version: "", + TTL: 0, + Expire: 0, + NodeID: &proto.RawNodeID{ + Hash: hash.Hash{}, + }, + }, + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: 1, + NodeID: "0000000000000000000000000000000000000000000000000000000000000001", + DatabaseID: "1", + ConnectionID: 1, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + QueriesHash: hash.Hash{}, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "xxx", + Args: nil, + }, + }, + }, + } + encoded, err := db.EncodePayload(req) + So(err, ShouldBeNil) + req2, err := db.DecodePayload(encoded) + So(err, ShouldBeNil) + So(req.Header, ShouldResemble, req2.(*types.Request).Header) + So(reflect.DeepEqual(req.Header, req2.(*types.Request).Header), ShouldBeTrue) + So(reflect.DeepEqual(req.Payload, req2.(*types.Request).Payload), ShouldBeTrue) + encoded2, err := db.EncodePayload(req) + So(err, ShouldBeNil) + So(encoded2, ShouldResemble, encoded) + }) +} + func buildAck(res *types.Response) (ack *types.Ack, err error) { // get node id var nodeID proto.NodeID @@ -572,14 +592,15 @@ func buildAck(res *types.Response) (ack *types.Ack, err error) { ack = &types.Ack{ Header: types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: res.Header, - NodeID: nodeID, - Timestamp: getLocalTime(), + Response: res.Header.ResponseHeader, + ResponseHash: res.Header.Hash(), + NodeID: nodeID, + Timestamp: getLocalTime(), }, }, } - err = ack.Sign(privateKey, true) + err = ack.Sign(privateKey) return } diff --git a/worker/dbms.go b/worker/dbms.go index 9329ea6d1..6aa7834f9 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -231,6 +231,10 @@ func (dbms *DBMS) createDatabase(tx interfaces.Transaction, count uint32) { if err != nil { log.WithError(err).Error("create database error") } + + if dbms.cfg.OnCreateDatabase != nil { + go dbms.cfg.OnCreateDatabase() + } } func (dbms *DBMS) buildSQLChainServiceInstance( @@ -390,6 +394,7 @@ func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err err UpdateBlockCount: conf.GConf.BillingBlockCount, UseEventualConsistency: instance.ResourceMeta.UseEventualConsistency, ConsistencyLevel: instance.ResourceMeta.ConsistencyLevel, + IsolationLevel: instance.ResourceMeta.IsolationLevel, SlowQueryTime: DefaultSlowQueryTime, } @@ -444,7 +449,7 @@ func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { if err != nil { return } - err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType) + err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType, req.Payload.Queries) if err != nil { return } @@ -463,6 +468,15 @@ func (dbms *DBMS) Ack(ack *types.Ack) (err error) { var db *Database var exists bool + // check permission + addr, err := crypto.PubKeyHash(ack.Header.Signee) + if err != nil { + return + } + err = dbms.checkPermission(addr, ack.Header.Response.Request.DatabaseID, types.ReadQuery, nil) + if err != nil { + return + } // find database if db, exists = dbms.getMeta(ack.Header.Response.Request.DatabaseID); !exists { err = ErrNotExists @@ -499,32 +513,59 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { } func (dbms *DBMS) checkPermission(addr proto.AccountAddress, - dbID proto.DatabaseID, queryType types.QueryType) (err error) { + dbID proto.DatabaseID, queryType types.QueryType, queries []types.Query) (err error) { log.Debugf("in checkPermission, database id: %s, user addr: %s", dbID, addr.String()) - if permStat, ok := dbms.busService.RequestPermStat(dbID, addr); ok { - if !permStat.Status.EnableQuery() { - err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + var ( + permStat *types.PermStat + ok bool + ) + + // get database perm stat + permStat, ok = dbms.busService.RequestPermStat(dbID, addr) + + // perm stat not exists + if !ok { + err = errors.Wrap(ErrPermissionDeny, "database not exists") + return + } + + // check if query is enabled + if !permStat.Status.EnableQuery() { + err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + return + } + + // check query type permission + switch queryType { + case types.ReadQuery: + if !permStat.Permission.HasReadPermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) return } - if queryType == types.ReadQuery { - if !permStat.Permission.CheckRead() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) - return - } - } else if queryType == types.WriteQuery { - if !permStat.Permission.CheckWrite() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) - return - } - } else { - err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) + case types.WriteQuery: + if !permStat.Permission.HasWritePermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) return - } - } else { - err = errors.Wrap(ErrPermissionDeny, "database not exists") + default: + err = errors.Wrapf(ErrInvalidPermission, + "invalid permission, permission: %d", permStat.Permission) + return + } + + // check for query pattern + var ( + disallowedQuery string + hasDisallowedQuery bool + ) + + if disallowedQuery, hasDisallowedQuery = permStat.Permission.HasDisallowedQueryPatterns(queries); hasDisallowedQuery { + err = errors.Wrapf(ErrPermissionDeny, "disallowed query %s", disallowedQuery) + log.WithError(err).WithFields(log.Fields{ + "permission": permStat.Permission, + "query": disallowedQuery, + }).Debug("can not query") return } @@ -538,7 +579,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, log.WithFields(log.Fields{ "databaseID": dbID, "nodeID": nodeID, - }).WithError(err).Warning("get pubkey failed in addTxSubscription") + }).WithError(err).Warning("get public key failed in addTxSubscription") return } addr, err := crypto.PubKeyHash(pubkey) @@ -557,7 +598,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, "startHeight": startHeight, }).Debugf("addTxSubscription") - err = dbms.checkPermission(addr, dbID, types.ReadQuery) + err = dbms.checkPermission(addr, dbID, types.ReadQuery, nil) if err != nil { log.WithFields(log.Fields{"databaseID": dbID, "addr": addr}).WithError(err).Warning("permission deny") return diff --git a/worker/dbms_config.go b/worker/dbms_config.go index bc8fcded7..e701b1a19 100644 --- a/worker/dbms_config.go +++ b/worker/dbms_config.go @@ -29,7 +29,8 @@ var ( // DBMSConfig defines the local multi-database management system config. type DBMSConfig struct { - RootDir string - Server *rpc.Server - MaxReqTimeGap time.Duration + RootDir string + Server *rpc.Server + MaxReqTimeGap time.Duration + OnCreateDatabase func() } diff --git a/worker/dbms_mux.go b/worker/dbms_mux.go index bb9a897fa..4e393642d 100644 --- a/worker/dbms_mux.go +++ b/worker/dbms_mux.go @@ -27,8 +27,10 @@ import ( ) const ( - // DBKayakMethodName defines the database kayak rpc method name. - DBKayakMethodName = "Call" + // DBKayakApplyMethodName defines the database kayak apply rpc method name. + DBKayakApplyMethodName = "Apply" + // DBKayakFetchMethodName defines the database kayak fetch rpc method name. + DBKayakFetchMethodName = "Fetch" ) // DBKayakMuxService defines a mux service for sqlchain kayak. @@ -55,8 +57,8 @@ func (s *DBKayakMuxService) unregister(id proto.DatabaseID) { s.serviceMap.Delete(id) } -// Call handles kayak call. -func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { +// Apply handles kayak apply call. +func (s *DBKayakMuxService) Apply(req *kt.ApplyRequest, _ *interface{}) (err error) { // call apply to specified kayak // treat req.Instance as DatabaseID id := proto.DatabaseID(req.Instance) @@ -67,3 +69,19 @@ func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) } + +// Fetch handles kayak fetch call. +func (s *DBKayakMuxService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + id := proto.DatabaseID(req.Instance) + + if v, ok := s.serviceMap.Load(id); ok { + var l *kt.Log + if l, err = v.(*kayak.Runtime).Fetch(req.GetContext(), req.Index); err != nil { + resp.Log = l + resp.Instance = req.Instance + return err + } + } + + return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) +} diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index 1fd40fd2a..e3d5dda3b 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -18,8 +18,6 @@ package worker import ( "github.com/CovenantSQL/CovenantSQL/proto" - //"context" - //"runtime/trace" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" @@ -82,10 +80,6 @@ func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err e // dbQueryFailCounter.Mark(1) // return //} - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "Query") - //defer task.End() - //defer trace.StartRegion(ctx, "QueryRegion").End() // verify query is sent from the request node if req.Envelope.NodeID.String() != string(req.Header.NodeID) { // node id mismatch @@ -112,11 +106,6 @@ func (rpc *DBMSRPCService) Ack(ack *types.Ack, _ *types.AckResponse) (err error) //if err = ack.Verify(); err != nil { // return //} - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "Ack") - //defer task.End() - //defer trace.StartRegion(ctx, "AckRegion").End() - // verify if ack node is the original ack node if ack.Envelope.NodeID.String() != string(ack.Header.Response.Request.NodeID) { err = errors.Wrap(ErrInvalidRequest, "request node id mismatch in ack") diff --git a/worker/dbms_test.go b/worker/dbms_test.go index 4895b024f..5984bb6c9 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -19,6 +19,7 @@ package worker import ( "io/ioutil" "os" + "sync/atomic" "testing" "time" @@ -103,6 +104,8 @@ func TestDBMS(t *testing.T) { err = req.Sign(privateKey) So(err, ShouldBeNil) + var seqNo uint64 + Convey("with bp privilege", func() { // send update again err = testRequest(route.DBSDeploy, req, &res) @@ -112,10 +115,12 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -123,9 +128,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -134,40 +141,41 @@ func TestDBMS(t *testing.T) { // grant write and read permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Write, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.ReadWrite), Status: types.Normal}) So(err, ShouldBeNil) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Write) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.ReadWrite) So(userState.Status, ShouldEqual, types.Normal) Convey("success write and read", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) So(queryRes.Payload.Columns, ShouldResemble, []string{"test"}) So(queryRes.Payload.DeclTypes, ShouldResemble, []string{"int"}) @@ -193,30 +201,35 @@ func TestDBMS(t *testing.T) { // revoke write permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Read, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Read), Status: types.Normal}) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Read) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Read) So(userState.Status, ShouldEqual, types.Normal) Convey("success reading and fail to write", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 3, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) So(err.Error(), ShouldContainSubstring, ErrPermissionDeny.Error()) // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 4, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -225,24 +238,66 @@ func TestDBMS(t *testing.T) { err = dbms.addTxSubscription(dbID, nodeID, 1) So(err, ShouldBeNil) }) + + // grant write only permission + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Write) + So(userState.Status, ShouldEqual, types.Normal) + + Convey("success writing and failed to read", func() { + // sending read query + var readQuery *types.Request + var queryRes *types.Response + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldContainSubstring, ErrPermissionDeny.Error()) + + // sending write query + var writeQuery *types.Request + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, 0) + }) }) // grant invalid permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Void, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Void), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Void) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Void) So(userState.Status, ShouldEqual, types.Normal) Convey("invalid permission query should fail", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 5, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -250,9 +305,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 6, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -264,20 +321,24 @@ func TestDBMS(t *testing.T) { // grant admin permission but in arrears err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Arrears}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Arrears}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Arrears) Convey("arrears query should fail", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 7, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -285,9 +346,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 8, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -296,39 +359,41 @@ func TestDBMS(t *testing.T) { // switch user to normal err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Normal) Convey("can send read and write queries", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 9, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 10, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) So(queryRes.Payload.Columns, ShouldResemble, []string{"test"}) So(queryRes.Payload.DeclTypes, ShouldResemble, []string{"int"}) @@ -346,11 +411,115 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) }) + // enforce query pattern regulations + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: &types.UserPermission{ + Role: types.Admin, + Patterns: []string{ + "create table test (test int)", + "SELECT 1", + "INSERT INTO TEST VALUES(1)", + }, + }, Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Permission.Patterns, ShouldHaveLength, 3) + + Convey("query patterns restrictions", func() { + var writeQuery *types.Request + var queryRes *types.Response + + // sending allowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "INSERT INTO TEST VALUES(1)", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, 0) + + // sending allowed read query + var readQuery *types.Request + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "SELECT 1", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) + So(queryRes.Payload.Rows, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) + + // sending disallowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed write query mixed with valid write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "INSERT INTO TEST VALUES(1)", + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "SELECT 1", + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + }) + + // set back permission object + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Status, ShouldEqual, types.Normal) + Convey("query non-existent database", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), proto.DatabaseID("db_not_exists"), []string{ "create table test (test int)", "insert into test values(1)", diff --git a/worker/helper_test.go b/worker/helper_test.go index e7b48b14f..d561f2ed6 100644 --- a/worker/helper_test.go +++ b/worker/helper_test.go @@ -100,22 +100,22 @@ var ( testNotExistAddr = proto.AccountAddress(hash.THashH([]byte{'a', 'a'})) testUser1 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal, } testUser2 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arrears, } testUser3 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Reminder, } testUser4 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arbitration, } ) diff --git a/xenomint/chain.go b/xenomint/chain.go index 85193ce96..7f4e9fac3 100644 --- a/xenomint/chain.go +++ b/xenomint/chain.go @@ -17,6 +17,7 @@ package xenomint import ( + "database/sql" "time" ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -38,9 +39,8 @@ type Chain struct { // NewChain returns new chain instance. func NewChain(filename string) (c *Chain, err error) { var ( - strg xi.Storage - state *State - priv *ca.PrivateKey + strg xi.Storage + priv *ca.PrivateKey ) // generate empty nodeId nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") @@ -49,14 +49,11 @@ func NewChain(filename string) (c *Chain, err error) { if strg, err = xs.NewSqlite(filename); err != nil { return } - if state, err = NewState(nodeID, strg); err != nil { - return - } if priv, err = kms.GetLocalPrivateKey(); err != nil { return } c = &Chain{ - state: state, + state: NewState(sql.LevelReadUncommitted, nodeID, strg), priv: priv, } return @@ -83,11 +80,11 @@ func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { } log.WithFields(fields).Debug("Chain.Query duration stat (us)") }() - if ref, resp, err = c.state.Query(req); err != nil { + if ref, resp, err = c.state.Query(req, true); err != nil { return } queried = time.Since(start) - if err = resp.Sign(c.priv); err != nil { + if err = resp.BuildHash(); err != nil { return } signed = time.Since(start) diff --git a/xenomint/mux.go b/xenomint/mux.go index 48806ade9..520a81fe9 100644 --- a/xenomint/mux.go +++ b/xenomint/mux.go @@ -17,8 +17,6 @@ package xenomint import ( - //"context" - //"runtime/trace" "sync" "time" @@ -87,9 +85,6 @@ type MuxQueryResponse struct { // Query is the RPC method to process database query on mux service. func (s *MuxService) Query(req *MuxQueryRequest, resp *MuxQueryResponse) (err error) { - //var ctx, task = trace.NewTask(context.Background(), "MuxService.Query") - //defer task.End() - //defer trace.StartRegion(ctx, "Total").End() var ( c *Chain r *types.Response diff --git a/xenomint/query_sanitizer.go b/xenomint/query_sanitizer.go index 760f5092c..10117d863 100644 --- a/xenomint/query_sanitizer.go +++ b/xenomint/query_sanitizer.go @@ -74,6 +74,10 @@ var ( ) func convertQueryAndBuildArgs(pattern string, args []types.NamedArg) (containsDDL bool, p string, ifs []interface{}, err error) { + if lower := strings.ToLower(pattern); strings.Contains(lower, "begin") || + strings.Contains(lower, "rollback") { + return false, pattern, nil, nil + } var ( tokenizer = sqlparser.NewStringTokenizer(pattern) queryParts []string diff --git a/xenomint/state.go b/xenomint/state.go index fe41483ac..bdf84735e 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -30,16 +30,42 @@ import ( "github.com/pkg/errors" ) +type sqlQuerier interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +type sqlExecuter interface { + sqlQuerier + Exec(query string, args ...interface{}) (sql.Result, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + Commit() error + Rollback() error +} + +type sqlDB struct { + *sql.DB +} + +func (db *sqlDB) Commit() error { + return nil +} + +func (db *sqlDB) Rollback() error { + return nil +} + // State defines a xenomint state which is bound to a underlying storage. type State struct { + level sql.IsolationLevel + sync.RWMutex strg xi.Storage pool *pool closed bool nodeID proto.NodeID - // unc is the uncommitted transaction. - unc *sql.Tx + executer sqlExecuter maxTx uint64 lastCommitPoint uint64 current uint64 // current is the current lastSeq of the current transaction @@ -47,31 +73,43 @@ type State struct { } // NewState returns a new State bound to strg. -func NewState(nodeID proto.NodeID, strg xi.Storage) (s *State, err error) { - var t = &State{ +func NewState(level sql.IsolationLevel, nodeID proto.NodeID, strg xi.Storage) (s *State) { + s = &State{ + level: level, nodeID: nodeID, strg: strg, pool: newPool(), maxTx: 100, } - if t.unc, err = t.strg.Writer().Begin(); err != nil { - return - } - s = t + s.openSQLExecuter() return } -func (s *State) incSeq() { - atomic.AddUint64(&s.current, 1) +func (s *State) openSQLExecuter() { + if s.level == sql.LevelReadUncommitted { + var err error + if s.executer, err = s.strg.Writer().Begin(); err != nil { + log.WithError(err).Fatal("failed to open transaction") + } + } else { + s.executer = &sqlDB{DB: s.strg.Writer()} + } } -func (s *State) setSeq(id uint64) { - atomic.StoreUint64(&s.current, id) +func (s *State) reader() *sql.DB { + if s.level == sql.LevelReadUncommitted { + return s.strg.DirtyReader() + } + return s.strg.Reader() +} + +func (s *State) incSeq() { + atomic.AddUint64(&s.current, 1) } // SetSeq sets the initial id of the current transaction. func (s *State) SetSeq(id uint64) { - s.setSeq(id) + atomic.StoreUint64(&s.current, id) } func (s *State) getSeq() uint64 { @@ -89,15 +127,11 @@ func (s *State) Close(commit bool) (err error) { if s.closed { return } - if s.unc != nil { + if s.executer != nil { if commit { - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } + s.commitSQLExecuter() } else { - if err = s.uncRollback(); err != nil { - log.WithError(err).Fatal("failed to rollback") - } + s.rollbackSQLExecuter() } } if err = s.strg.Close(); err != nil { @@ -115,11 +149,6 @@ func buildTypeNamesFromSQLColumnTypes(types []*sql.ColumnType) (names []string) return } -type sqlQuerier interface { - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) -} - func readSingle( ctx context.Context, qer sqlQuerier, q *types.Query, ) ( @@ -187,7 +216,7 @@ func (s *State) readWithContext( ) // TODO(leventeliu): no need to run every read query here. for i, v := range req.Payload.Queries { - if cnames, ctypes, data, ierr = readSingle(ctx, s.strg.DirtyReader(), &v); ierr != nil { + if cnames, ctypes, data, ierr = readSingle(ctx, s.reader(), &v); ierr != nil { err = errors.Wrapf(ierr, "query at #%d failed", i) // Add to failed pool list s.pool.setFailed(req) @@ -199,11 +228,12 @@ func (s *State) readWithContext( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, - NodeID: s.nodeID, - Timestamp: s.getLocalTime(), - RowCount: uint64(len(data)), - LogOffset: s.getSeq(), + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: s.getSeq(), }, }, Payload: types.ResponsePayload{ @@ -225,14 +255,14 @@ func (s *State) readTx( data [][]interface{} querier sqlQuerier ) - if atomic.LoadUint32(&s.hasSchemaChange) == 1 { + if s.level == sql.LevelReadUncommitted && atomic.LoadUint32(&s.hasSchemaChange) == 1 { // lock transaction s.Lock() defer s.Unlock() - querier = s.unc + querier = s.executer } else { var tx *sql.Tx - if tx, ierr = s.strg.DirtyReader().Begin(); ierr != nil { + if tx, ierr = s.reader().Begin(); ierr != nil { err = errors.Wrap(ierr, "open tx failed") return } @@ -263,11 +293,12 @@ func (s *State) readTx( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, - NodeID: s.nodeID, - Timestamp: s.getLocalTime(), - RowCount: uint64(len(data)), - LogOffset: id, + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: id, }, }, Payload: types.ResponsePayload{ @@ -306,7 +337,7 @@ func (s *State) writeSingle( return } //parsed = time.Since(start) - if res, err = s.unc.Exec(pattern, args...); err == nil { + if res, err = s.executer.Exec(pattern, args...); err == nil { if containsDDL { atomic.StoreUint32(&s.hasSchemaChange, 1) } @@ -317,7 +348,7 @@ func (s *State) writeSingle( } func (s *State) write( - ctx context.Context, req *types.Request) (ref *QueryTracker, resp *types.Response, err error, + ctx context.Context, req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error, ) { var ( lastSeq uint64 @@ -364,13 +395,13 @@ func (s *State) write( lockReleased = time.Since(start) }() lastSeq = s.getSeq() - if qcnt > 1 { + if qcnt > 1 && s.level == sql.LevelReadUncommitted { // Set savepoint - if _, ierr = s.unc.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { + if _, ierr = s.executer.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to create savepoint %d", lastSeq) return } - defer s.unc.Exec(`ROLLBACK TO "?"`, lastSeq) + defer s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) } for i, v := range req.Payload.Queries { var res sql.Result @@ -386,20 +417,28 @@ func (s *State) write( lastInsertID, _ = res.LastInsertId() totalAffectedRows += curAffectedRows } - if qcnt > 1 { - // Release savepoint - if _, ierr = s.unc.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { - err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) - return + if s.level == sql.LevelReadUncommitted { + if qcnt > 1 { + // Release savepoint + if _, ierr = s.executer.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { + err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) + return + } } + } else { + // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to + // committed ones. + s.executer.Exec(`ROLLBACK`) } // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.tryCommit() + s.flushSQLExecuter() } writeDone = time.Since(start) - s.pool.enqueue(lastSeq, query) + if isLeader { + s.pool.enqueue(lastSeq, query) + } enqueued = time.Since(start) return }(); err != nil { @@ -410,7 +449,8 @@ func (s *State) write( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), NodeID: s.nodeID, Timestamp: s.getLocalTime(), RowCount: 0, @@ -449,7 +489,7 @@ func (s *State) replay(ctx context.Context, req *types.Request, resp *types.Resp // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.tryCommit() + s.flushSQLExecuter() } s.pool.enqueue(lastSeq, query) return @@ -471,6 +511,9 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.Lock() defer s.Unlock() for i, q := range block.QueryTxs { + if q.Request.Header.QueryType == types.ReadQuery { + continue + } var query = &QueryTracker{Req: q.Request, Resp: &types.Response{Header: *q.Response}} lastsp = s.getSeq() if q.Response.ResponseHeader.LogOffset > lastsp { @@ -487,9 +530,6 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) } // Replay query for j, v := range q.Request.Payload.Queries { - if q.Request.Header.QueryType == types.ReadQuery { - continue - } if q.Request.Header.QueryType != types.WriteQuery { err = errors.Wrapf(ErrInvalidRequest, "replay block at %d:%d", i, j) return @@ -502,7 +542,7 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.pool.enqueue(lastsp, query) } // Always try to commit after a block is successfully replayed - s.tryCommit() + s.flushSQLExecuter() // Remove duplicate failed queries from local pool for _, r := range block.FailedReqs { s.pool.removeFailed(r) @@ -540,12 +580,7 @@ func (s *State) commit() (err error) { lockReleased = time.Since(start) }() lockAcquired = time.Since(start) - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } - if s.unc, err = s.strg.Writer().Begin(); err != nil { - log.WithError(err).Fatal("failed to begin") - } + s.flushSQLExecuter() committed = time.Since(start) _ = s.pool.queries s.pool = newPool() @@ -591,7 +626,7 @@ func (s *State) CommitExWithContext( lockReleased = time.Since(start) }() // Always try to commit before the block is produced - s.tryCommit() + s.flushSQLExecuter() committed = time.Since(start) // Return pooled items and reset failed = s.pool.failedList() @@ -601,33 +636,26 @@ func (s *State) CommitExWithContext( return } -func (s *State) tryCommit() { - var err error - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } - if s.unc, err = s.strg.Writer().Begin(); err != nil { - log.WithError(err).Fatal("failed to begin") - } +func (s *State) flushSQLExecuter() { + s.commitSQLExecuter() + s.openSQLExecuter() } -func (s *State) uncCommit() (err error) { - if err = s.unc.Commit(); err != nil { - return +func (s *State) commitSQLExecuter() { + if err := s.executer.Commit(); err != nil { + log.WithError(err).Fatal("failed to commit") } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) atomic.StoreUint64(&s.lastCommitPoint, s.getSeq()) - return } -func (s *State) uncRollback() (err error) { - if err = s.unc.Rollback(); err != nil { - return +func (s *State) rollbackSQLExecuter() { + if err := s.executer.Rollback(); err != nil { + log.WithError(err).Fatal("failed to rollback") } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) - return } func (s *State) getLocalTime() time.Time { @@ -636,20 +664,20 @@ func (s *State) getLocalTime() time.Time { // Query does the query(ies) in req, pools the request and persists any change to // the underlying storage. -func (s *State) Query(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { - return s.QueryWithContext(context.Background(), req) +func (s *State) Query(req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error) { + return s.QueryWithContext(context.Background(), req, isLeader) } // QueryWithContext does the query(ies) in req, pools the request and persists any change to // the underlying storage. func (s *State) QueryWithContext( - ctx context.Context, req *types.Request) (ref *QueryTracker, resp *types.Response, err error, + ctx context.Context, req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error, ) { switch req.Header.QueryType { case types.ReadQuery: return s.readTx(ctx, req) case types.WriteQuery: - return s.write(ctx, req) + return s.write(ctx, req, isLeader) default: err = ErrInvalidRequest } diff --git a/xenomint/state_test.go b/xenomint/state_test.go index c6dec59bd..48965d2e4 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -17,10 +17,12 @@ package xenomint import ( + "context" "database/sql" "fmt" "os" "path" + "sync" "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" @@ -33,6 +35,10 @@ import ( . "github.com/smartystreets/goconvey/convey" ) +var ( + nodeID = proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") +) + func TestState(t *testing.T) { Convey("Given a chain state object", t, func() { var ( @@ -43,12 +49,10 @@ func TestState(t *testing.T) { strg1, strg2 xi.Storage err error ) - nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") strg1, err = xs.NewSqlite(fmt.Sprint("file:", fl1)) So(err, ShouldBeNil) So(strg1, ShouldNotBeNil) - st1, err = NewState(nodeID, strg1) - So(err, ShouldBeNil) + st1 = NewState(sql.LevelReadUncommitted, nodeID, strg1) So(st1, ShouldNotBeNil) Reset(func() { // Clean database file after each pass @@ -64,8 +68,7 @@ func TestState(t *testing.T) { strg2, err = xs.NewSqlite(fmt.Sprint("file:", fl2)) So(err, ShouldBeNil) So(strg1, ShouldNotBeNil) - st2, err = NewState(nodeID, strg2) - So(err, ShouldBeNil) + st2 = NewState(sql.LevelReadUncommitted, nodeID, strg2) So(st1, ShouldNotBeNil) Reset(func() { // Clean database file after each pass @@ -85,7 +88,7 @@ func TestState(t *testing.T) { var req = buildRequest(types.WriteQuery, []types.Query{ buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), }) - _, _, err = st1.Query(req) + _, _, err = st1.Query(req, true) So(err, ShouldNotBeNil) err = errors.Cause(err) So(err, ShouldNotBeNil) @@ -99,12 +102,12 @@ func TestState(t *testing.T) { }) resp *types.Response ) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT * FROM t1`), - })) + }), true) // any schema change query will trigger performance degradation mode in current block So(err, ShouldBeNil) }) @@ -121,12 +124,12 @@ func TestState(t *testing.T) { }) resp *types.Response ) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) err = st1.commit() So(err, ShouldBeNil) - _, resp, err = st2.Query(req) + _, resp, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) err = st2.commit() @@ -134,7 +137,7 @@ func TestState(t *testing.T) { _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, 1, "v1"), buildQuery(`SELECT v FROM t1 WHERE k=?`, 1), - })) + }), true) // The use of Query instead of Exec won't produce an "attempt to write" error // like Exec, but it should still keep it readonly -- which means writes will // be ignored in this case. @@ -145,7 +148,7 @@ func TestState(t *testing.T) { req = buildRequest(types.QueryType(0xff), []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), }) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldEqual, ErrInvalidRequest) So(resp, ShouldBeNil) err = st1.Replay(req, nil) @@ -154,7 +157,7 @@ func TestState(t *testing.T) { Convey("The state should report error on malformed queries", func() { _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -170,7 +173,7 @@ func TestState(t *testing.T) { So(err, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -187,13 +190,13 @@ func TestState(t *testing.T) { st1.Stat(id1) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`XXXXXX v FROM t1`), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t2`), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -207,12 +210,12 @@ func TestState(t *testing.T) { Convey("The state should work properly with reading/writing queries", func() { _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 0) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[0][0]), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 1) So(resp.Payload, ShouldResemble, types.ResponsePayload{ @@ -226,12 +229,12 @@ func TestState(t *testing.T) { buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 0) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1`), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 4) So(resp.Payload, ShouldResemble, types.ResponsePayload{ @@ -248,7 +251,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT * FROM t1`), - })) + }), true) So(err, ShouldBeNil) So(resp.Payload, ShouldResemble, types.ResponsePayload{ Columns: []string{"k", "v"}, @@ -265,22 +268,22 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), // Test show statements _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW CREATE TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW INDEX FROM TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW TABLES`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) st1.Stat(id1) @@ -339,7 +342,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), } ) for i := range reqs { - qt, resp, err = st1.Query(reqs[i]) + qt, resp, err = st1.Query(reqs[i], true) So(err, ShouldBeNil) So(qt, ShouldNotBeNil) So(resp, ShouldNotBeNil) @@ -354,10 +357,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) @@ -386,7 +389,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), ) for i := range reqs { var resp *types.Response - qt, resp, err = st1.Query(reqs[i]) + qt, resp, err = st1.Query(reqs[i], true) So(err, ShouldBeNil) So(qt, ShouldNotBeNil) So(resp, ShouldNotBeNil) @@ -431,9 +434,12 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), // Try to replay modified block #0 var blockx = &types.Block{ QueryTxs: []*types.QueryAsTx{ - &types.QueryAsTx{ + { Request: &types.Request{ Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + }, DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ DataHash: [32]byte{ 0, 0, 0, 0, 0, 0, 0, 1, @@ -471,10 +477,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) @@ -505,10 +511,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) @@ -660,3 +666,90 @@ func TestConvertQueryAndBuildArgs(t *testing.T) { So(sanitizedQuery, ShouldEqual, ddlQuery) }) } + +func TestSerializableState(t *testing.T) { + Convey("Given a serialzable state", t, func() { + var ( + filePath = path.Join(testingDataDir, t.Name()) + state *State + storage xi.Storage + err error + ) + storage, err = xs.NewSqlite(fmt.Sprint("file:", filePath)) + So(err, ShouldBeNil) + So(storage, ShouldNotBeNil) + state = NewState(sql.LevelSerializable, nodeID, storage) + So(state, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = state.Close(true) + So(err, ShouldBeNil) + err = os.Remove(filePath) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(filePath, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(filePath, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When a basic KV table is created", func() { + var ( + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = state.Query(req, true) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + Convey("The state should not see uncommitted changes", func(c C) { + // Build transaction query + var ( + count = 1000 + queries = make([]types.Query, count+1) + req *types.Request + ) + queries[0] = buildQuery(`BEGIN`) + for i := 0; i < count; i++ { + queries[i+1] = buildQuery( + `INSERT INTO t1(k, v) VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ) + } + req = buildRequest(types.WriteQuery, queries) + // Send uncommitted transaction on background + var ( + wg = &sync.WaitGroup{} + ctx, cancel = context.WithCancel(context.Background()) + ) + defer func() { + cancel() + wg.Wait() + }() + wg.Add(1) + go func() { + defer wg.Done() + for { + var _, resp, err = state.Query(req, true) + c.So(err, ShouldBeNil) + c.So(resp.Header.RowCount, ShouldEqual, 0) + select { + case <-ctx.Done(): + return + default: + } + } + }() + // Test isolation level + for i := 0; i < count; i++ { + _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), + }), true) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(0)}}}, + }) + } + }) + }) + }) +} diff --git a/xenomint/types/block_gen.go b/xenomint/types/block_gen.go index 4c19a8bf2..ba4ad7732 100644 --- a/xenomint/types/block_gen.go +++ b/xenomint/types/block_gen.go @@ -11,19 +11,6 @@ func (z *Block) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - // map header, size 2 - o = append(o, 0x83, 0x83, 0x82, 0x82) - if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } o = append(o, 0x83) o = hsp.AppendArrayHeader(o, uint32(len(z.ReadQueries))) for za0001 := range z.ReadQueries { @@ -37,7 +24,18 @@ func (z *Block) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) + // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.WriteQueries))) for za0002 := range z.WriteQueries { if z.WriteQueries[za0002] == nil { @@ -55,7 +53,7 @@ func (z *Block) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 1 + 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 12 + hsp.ArrayHeaderSize + s = 1 + 12 + hsp.ArrayHeaderSize for za0001 := range z.ReadQueries { if z.ReadQueries[za0001] == nil { s += hsp.NilSize @@ -63,7 +61,7 @@ func (z *Block) Msgsize() (s int) { s += z.ReadQueries[za0001].Msgsize() } } - s += 13 + hsp.ArrayHeaderSize + s += 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 13 + hsp.ArrayHeaderSize for za0002 := range z.WriteQueries { if z.WriteQueries[za0002] == nil { s += hsp.NilSize @@ -79,40 +77,35 @@ func (z *BlockHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x86) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BlockHeader) Msgsize() (s int) { - s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -121,13 +114,12 @@ func (z *SignedBlockHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.BlockHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { diff --git a/xenomint/types/common_gen.go b/xenomint/types/common_gen.go index 8fdb3ee08..d0fb74245 100644 --- a/xenomint/types/common_gen.go +++ b/xenomint/types/common_gen.go @@ -11,49 +11,46 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if z.Signee == nil { + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if z.Signature == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { + if oTemp, err := z.Signature.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if z.Signature == nil { + if z.Signee == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { + if oTemp, err := z.Signee.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.DataHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { + s = 1 + 9 + z.DataHash.Msgsize() + 10 + if z.Signature == nil { s += hsp.NilSize } else { - s += z.Signee.Msgsize() + s += z.Signature.Msgsize() } - s += 10 - if z.Signature == nil { + s += 7 + if z.Signee == nil { s += hsp.NilSize } else { - s += z.Signature.Msgsize() + s += z.Signee.Msgsize() } - s += 9 + z.DataHash.Msgsize() return } diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go index eb2eaf603..a670fca3e 100644 --- a/xenomint/xxx_test.go +++ b/xenomint/xxx_test.go @@ -21,10 +21,8 @@ import ( "math/rand" "os" "path" - "sync/atomic" - - //"runtime/trace" "sync" + "sync/atomic" "syscall" "testing" "time" @@ -227,20 +225,7 @@ func setup() { panic(err) } - // Setup runtime trace for testing - //if testingTraceFile, err = ioutil.TempFile("", "CovenantSQL.trace."); err != nil { - // panic(err) - //} - //if err = trace.Start(testingTraceFile); err != nil { - // panic(err) - //} - log.SetLevel(log.DebugLevel) - //fl, err := os.OpenFile("./xenomint_test.log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) - //if err != nil { - // panic(err) - //} - //log.SetOutput(fl) log.SetOutput(os.Stdout) }