From 177dfb2c96d542433e38aa237a1a6d2731658b28 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 27 Dec 2018 01:22:47 +0800 Subject: [PATCH 001/302] Add JSON-RPC API serving through websocket --- api/echo.go | 17 +++++++ api/jsonrpc.go | 69 +++++++++++++++++++++++++++ api/service.go | 113 ++++++++++++++++++++++++++++++++++++++++++++ api/service_test.go | 15 ++++++ 4 files changed, 214 insertions(+) create mode 100644 api/echo.go create mode 100644 api/jsonrpc.go create mode 100644 api/service.go create mode 100644 api/service_test.go diff --git a/api/echo.go b/api/echo.go new file mode 100644 index 000000000..868d014ff --- /dev/null +++ b/api/echo.go @@ -0,0 +1,17 @@ +package api + +import ( + "context" + + "github.com/sourcegraph/jsonrpc2" +) + +func init() { + registerMethod("echo", echo) +} + +func echo(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + return req.Params, nil +} diff --git a/api/jsonrpc.go b/api/jsonrpc.go new file mode 100644 index 000000000..0f19f97fd --- /dev/null +++ b/api/jsonrpc.go @@ -0,0 +1,69 @@ +package api + +import ( + "context" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" + "github.com/sourcegraph/jsonrpc2" +) + +var ( + jsonrpcHandler = NewJSONRPCHandler() +) + +type jsonrpcHandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) + +func registerMethod(method string, handlerFunc jsonrpcHandlerFunc) { + log.WithField("method", method).Info("api: register rpc method") + jsonrpcHandler.RegisterMethod(method, handlerFunc) +} + +// JSONRPCHandler is a handler handling JSON-RPC protocol. +type JSONRPCHandler struct { + methods map[string]jsonrpcHandlerFunc +} + +// NewJSONRPCHandler creates a new JSONRPCHandler. +func NewJSONRPCHandler() *JSONRPCHandler { + return &JSONRPCHandler{ + methods: make(map[string]jsonrpcHandlerFunc), + } +} + +// RegisterMethod register a method. +func (h *JSONRPCHandler) RegisterMethod(method string, handlerFunc jsonrpcHandlerFunc) { + h.methods[method] = handlerFunc +} + +// Handler returns a jsonrpc2.Handler. +func (h *JSONRPCHandler) Handler() jsonrpc2.Handler { + return jsonrpc2.HandlerWithError(h.handle) +} + +var methodNotFound = func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) { + return nil, errors.Errorf("method not found: %q", req.Method) +} + +// Handle implements jsonrpc2.Handler. +func (h *JSONRPCHandler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + defer func() { + if p := recover(); p != nil { + switch p := p.(type) { + case error: + err = p + default: + err = fmt.Errorf("%v", p) + } + } + }() + + fn := h.methods[req.Method] + if fn == nil { + fn = methodNotFound + } + return fn(ctx, conn, req) +} diff --git a/api/service.go b/api/service.go new file mode 100644 index 000000000..c6186c42a --- /dev/null +++ b/api/service.go @@ -0,0 +1,113 @@ +package api + +import ( + "context" + "net" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/gorilla/websocket" + "github.com/pkg/errors" + "github.com/sourcegraph/jsonrpc2" + wsstream "github.com/sourcegraph/jsonrpc2/websocket" +) + +// Service configs the API service. +type Service struct { + WebsocketAddr string // start a websocket server + ReadTimeout time.Duration + WriteTimeout time.Duration + + stopChan chan struct{} +} + +// StartServers start API servers in a non-blocking way, fatal on errors. +func (s *Service) StartServers() { + go s.RunServers() +} + +// StopServers top API servers. +func (s *Service) StopServers() { + close(s.stopChan) +} + +// RunServers start API servers in a blocking way, fatal on errors. +func (s *Service) RunServers() { + s.stopChan = make(chan struct{}) + wg := sync.WaitGroup{} + + if s.WebsocketAddr != "" { + log.WithField("addr", s.WebsocketAddr).Info("api: start websocket server") + wg.Add(1) + go s.runWebsocketServer(&wg) + } + + sigchan := make(chan os.Signal) + signal.Notify(sigchan, os.Interrupt, syscall.SIGTERM) + <-sigchan + close(s.stopChan) + wg.Wait() +} + +func (s *Service) runWebsocketServer(wg *sync.WaitGroup) { + defer wg.Done() + + var connOpts []jsonrpc2.ConnOpt + + mux := http.NewServeMux() + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(rw, r, nil) + if err != nil { + log.WithError(err).Error("api: upgrade http connection to websocket failed") + http.Error(rw, errors.WithMessage(err, "could not upgrade to websocket").Error(), http.StatusBadRequest) + return + } + defer conn.Close() + + // TODO: add metric for the connections + log.Debug("received incoming connection") + <-jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + jsonrpcHandler.Handler(), + connOpts..., + ).DisconnectNotify() + log.Debug("connection closed") + }) + + addr := s.WebsocketAddr + listener, err := net.Listen("tcp", addr) + if err != nil { + log.WithField("addr", addr).WithError(err).Fatal("api: couldn't bind to address") + return + } + + httpServer := &http.Server{ + Handler: mux, + ReadTimeout: s.ReadTimeout, + WriteTimeout: s.WriteTimeout, + } + + go func() { + if err := httpServer.Serve(listener); err != nil { + log.WithError(err).Error("api: websocket server serve error") + } + }() + + <-s.stopChan + + log.Warn("api: shutdown websocket server") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + if err := httpServer.Shutdown(ctx); err != nil { + log.WithError(err).Error("shutdown server") + } + cancel() + log.Warn("api: websocket server stopped") +} diff --git a/api/service_test.go b/api/service_test.go new file mode 100644 index 000000000..813643c9a --- /dev/null +++ b/api/service_test.go @@ -0,0 +1,15 @@ +package api_test + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/api" +) + +func TestService(t *testing.T) { + service := &api.Service{ + WebsocketAddr: ":8546", + } + service.RunServers() + // TODO +} From 03ab5efb4c6901e24519470fd159e4ef55577515 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 27 Dec 2018 01:32:56 +0800 Subject: [PATCH 002/302] Add jsonrpc2.ObjectStream implementation over HTTP --- api/http_stream.go | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 api/http_stream.go diff --git a/api/http_stream.go b/api/http_stream.go new file mode 100644 index 000000000..51ae2be85 --- /dev/null +++ b/api/http_stream.go @@ -0,0 +1,37 @@ +package api + +import ( + "encoding/json" + "net/http" +) + +type httpconn struct { + rw http.ResponseWriter + r *http.Request +} + +// HTTPStream is data stream as jsonrpc2.ObjectStream over HTTP transport. +type HTTPStream struct { + conn httpconn +} + +// NewHTTPStream creates a new HTTPStream. +func NewHTTPStream(conn httpconn) HTTPStream { + return HTTPStream{conn: conn} +} + +// WriteObject implements jsonrpc2.ObjectStream.WriteObject. +func (t HTTPStream) WriteObject(obj interface{}) error { + t.conn.rw.Header().Add("Content-Type", "application/json") + return json.NewEncoder(t.conn.rw).Encode(obj) +} + +// ReadObject implements jsonrpc2.ObjectStream.ReadObject. +func (t HTTPStream) ReadObject(v interface{}) error { + return json.NewDecoder(t.conn.r.Body).Decode(v) +} + +// Close implements jsonrpc2.ObjectStream.Close. +func (t HTTPStream) Close() error { + return t.conn.r.Body.Close() +} From cd1d50d3ed5f6e48c3a6347e0ee11078efd24f39 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 27 Dec 2018 01:38:11 +0800 Subject: [PATCH 003/302] Update jsonrpc2 and websocket dependencies --- Gopkg.lock | 22 + vendor/github.com/gorilla/websocket/AUTHORS | 9 + vendor/github.com/gorilla/websocket/LICENSE | 22 + vendor/github.com/gorilla/websocket/README.md | 64 + vendor/github.com/gorilla/websocket/client.go | 395 ++++++ .../gorilla/websocket/client_clone.go | 16 + .../gorilla/websocket/client_clone_legacy.go | 38 + .../gorilla/websocket/compression.go | 148 +++ vendor/github.com/gorilla/websocket/conn.go | 1165 +++++++++++++++++ .../gorilla/websocket/conn_write.go | 15 + .../gorilla/websocket/conn_write_legacy.go | 18 + vendor/github.com/gorilla/websocket/doc.go | 180 +++ vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 54 + .../github.com/gorilla/websocket/mask_safe.go | 15 + .../github.com/gorilla/websocket/prepared.go | 102 ++ vendor/github.com/gorilla/websocket/proxy.go | 77 ++ vendor/github.com/gorilla/websocket/server.go | 363 +++++ vendor/github.com/gorilla/websocket/trace.go | 19 + .../github.com/gorilla/websocket/trace_17.go | 12 + vendor/github.com/gorilla/websocket/util.go | 237 ++++ .../gorilla/websocket/x_net_proxy.go | 473 +++++++ .../github.com/sourcegraph/jsonrpc2/LICENSE | 9 + .../github.com/sourcegraph/jsonrpc2/README.md | 12 + .../github.com/sourcegraph/jsonrpc2/async.go | 17 + .../sourcegraph/jsonrpc2/call_opt.go | 30 + .../sourcegraph/jsonrpc2/conn_opt.go | 101 ++ .../jsonrpc2/handler_with_error.go | 64 + .../sourcegraph/jsonrpc2/jsonrpc2.go | 691 ++++++++++ .../github.com/sourcegraph/jsonrpc2/stream.go | 164 +++ .../sourcegraph/jsonrpc2/websocket/stream.go | 44 + 31 files changed, 4636 insertions(+) create mode 100644 vendor/github.com/gorilla/websocket/AUTHORS create mode 100644 vendor/github.com/gorilla/websocket/LICENSE create mode 100644 vendor/github.com/gorilla/websocket/README.md create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write.go create mode 100644 vendor/github.com/gorilla/websocket/conn_write_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/proxy.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/trace.go create mode 100644 vendor/github.com/gorilla/websocket/trace_17.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/gorilla/websocket/x_net_proxy.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/LICENSE create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/README.md create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/async.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/call_opt.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/stream.go create mode 100644 vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go diff --git a/Gopkg.lock b/Gopkg.lock index f938ce406..fcf0c7b8e 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -231,6 +231,14 @@ revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" version = "v1.6.2" +[[projects]] + digest = "1:7b5c6e2eeaa9ae5907c391a91c132abfd5c9e8a784a341b5625e750c67e6825d" + name = "github.com/gorilla/websocket" + packages = ["."] + pruneopts = "UT" + revision = "66b9c49e59c6c48f0ffce28c2d8b8a5678502c6d" + version = "v1.4.0" + [[projects]] branch = "master" digest = "1:438016f7d4af8e5a7010b6d0705b267a7607ddc0decad051e83a9458c6b9a523" @@ -471,6 +479,17 @@ revision = "9e8dc3f972df6c8fcc0375ef492c24d0bb204857" version = "1.6.3" +[[projects]] + branch = "master" + digest = "1:782488353b5fc6316c46793ef6d5cf6d0af4eb82492dba1920833b2ffb34881f" + name = "github.com/sourcegraph/jsonrpc2" + packages = [ + ".", + "websocket", + ] + pruneopts = "UT" + revision = "549eb959f029d014d623104d40ab966d159a92de" + [[projects]] branch = "master" digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c" @@ -642,6 +661,7 @@ "github.com/fortytw2/leaktest", "github.com/gorilla/handlers", "github.com/gorilla/mux", + "github.com/gorilla/websocket", "github.com/jmoiron/jsonq", "github.com/jordwest/mock-conn", "github.com/lufia/iostat", @@ -659,6 +679,8 @@ "github.com/siddontang/go-mysql/server", "github.com/sirupsen/logrus", "github.com/smartystreets/goconvey/convey", + "github.com/sourcegraph/jsonrpc2", + "github.com/sourcegraph/jsonrpc2/websocket", "github.com/syndtr/goleveldb/leveldb", "github.com/syndtr/goleveldb/leveldb/iterator", "github.com/syndtr/goleveldb/leveldb/opt", diff --git a/vendor/github.com/gorilla/websocket/AUTHORS b/vendor/github.com/gorilla/websocket/AUTHORS new file mode 100644 index 000000000..1931f4006 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/AUTHORS @@ -0,0 +1,9 @@ +# This is the official list of Gorilla WebSocket authors for copyright +# purposes. +# +# Please keep the list sorted. + +Gary Burd +Google LLC (https://opensource.google.com/) +Joachim Bauch + diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 000000000..9171c9722 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 000000000..20e391f86 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,64 @@ +# Gorilla WebSocket + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the +[WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + +### Documentation + +* [API Reference](http://godoc.org/github.com/gorilla/websocket) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](http://autobahn.ws/testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). + +### Gorilla WebSocket compared with other packages + + + + + + + + + + + + + + + + + + +
github.com/gorillagolang.org/x/net
RFC 6455 Features
Passes Autobahn Test SuiteYesNo
Receive fragmented messageYesNo, see note 1
Send close messageYesNo
Send pings and receive pongsYesNo
Get the type of a received data messageYesYes, see note 2
Other Features
Compression ExtensionsExperimentalNo
Read message using io.ReaderYesNo, see note 3
Write message using io.WriteCloserYesNo, see note 3
+ +Notes: + +1. Large messages are fragmented in [Chrome's new WebSocket implementation](http://www.ietf.org/mail-archive/web/hybi/current/msg10503.html). +2. The application can get the type of a received data message by implementing + a [Codec marshal](http://godoc.org/golang.org/x/net/websocket#Codec.Marshal) + function. +3. The go.net io.Reader and io.Writer operate across WebSocket frame boundaries. + Read returns when the input buffer is full or a frame boundary is + encountered. Each call to Write sends a single frame message. The Gorilla + io.Reader and io.WriteCloser operate on a single WebSocket message. + diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 000000000..2e32fd506 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,395 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, net.DialContext is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } else { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + c.Close() + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy_FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + var err error + if trace != nil { + err = doHandshakeWithTrace(trace, tlsConn, cfg) + } else { + err = doHandshake(tlsConn, cfg) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func doHandshake(tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.Handshake(); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 000000000..4f0d94372 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 000000000..babb007fb --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 000000000..813ffb1e8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 000000000..d2a21c148 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1165 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan bool, 1) + mu <- true + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_write.go b/vendor/github.com/gorilla/websocket/conn_write.go new file mode 100644 index 000000000..a509a21f8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "net" + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn_write_legacy.go b/vendor/github.com/gorilla/websocket/conn_write_legacy.go new file mode 100644 index 000000000..37edaff5a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_write_legacy.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +func (c *Conn) writeBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) > 0 { + if _, err := c.conn.Write(buf); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 000000000..dcce1a63c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,180 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 000000000..dc2c1f641 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 000000000..577fce9ef --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 000000000..2aac060e5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 000000000..74ec565d2 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 000000000..bf2478e43 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,77 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "net" + "net/http" + "net/url" + "strings" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy_RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy_Dialer) (proxy_Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, fowardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + fowardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.fowardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + conn.Close() + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + conn.Close() + return nil, err + } + + if resp.StatusCode != 200 { + conn.Close() + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 000000000..a761824b3 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,363 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-WebSocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-WebSocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + bw.WriteByte(0) + bw.Flush() + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/trace.go b/vendor/github.com/gorilla/websocket/trace.go new file mode 100644 index 000000000..834f122a0 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace.go @@ -0,0 +1,19 @@ +// +build go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + if trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(tlsConn, cfg) + if trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/trace_17.go b/vendor/github.com/gorilla/websocket/trace_17.go new file mode 100644 index 000000000..77d05a0b5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/trace_17.go @@ -0,0 +1,12 @@ +// +build !go1.8 + +package websocket + +import ( + "crypto/tls" + "net/http/httptrace" +) + +func doHandshakeWithTrace(trace *httptrace.ClientTrace, tlsConn *tls.Conn, cfg *tls.Config) error { + return doHandshake(tlsConn, cfg) +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 000000000..354001e1e --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,237 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/gorilla/websocket/x_net_proxy.go b/vendor/github.com/gorilla/websocket/x_net_proxy.go new file mode 100644 index 000000000..2e668f6b8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/x_net_proxy.go @@ -0,0 +1,473 @@ +// Code generated by golang.org/x/tools/cmd/bundle. DO NOT EDIT. +//go:generate bundle -o x_net_proxy.go golang.org/x/net/proxy + +// Package proxy provides support for a variety of protocols to proxy network +// data. +// + +package websocket + +import ( + "errors" + "io" + "net" + "net/url" + "os" + "strconv" + "strings" + "sync" +) + +type proxy_direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var proxy_Direct = proxy_direct{} + +func (proxy_direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type proxy_PerHost struct { + def, bypass proxy_Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func proxy_NewPerHost(defaultDialer, bypass proxy_Dialer) *proxy_PerHost { + return &proxy_PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *proxy_PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *proxy_PerHost) dialerForRequest(host string) proxy_Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *proxy_PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *proxy_PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *proxy_PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *proxy_PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *proxy_PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} + +// A Dialer is a means to establish a connection. +type proxy_Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type proxy_Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func proxy_FromEnvironment() proxy_Dialer { + allProxy := proxy_allProxyEnv.Get() + if len(allProxy) == 0 { + return proxy_Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return proxy_Direct + } + proxy, err := proxy_FromURL(proxyURL, proxy_Direct) + if err != nil { + return proxy_Direct + } + + noProxy := proxy_noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := proxy_NewPerHost(proxy, proxy_Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxy_proxySchemes map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func proxy_RegisterDialerType(scheme string, f func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) { + if proxy_proxySchemes == nil { + proxy_proxySchemes = make(map[string]func(*url.URL, proxy_Dialer) (proxy_Dialer, error)) + } + proxy_proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func proxy_FromURL(u *url.URL, forward proxy_Dialer) (proxy_Dialer, error) { + var auth *proxy_Auth + if u.User != nil { + auth = new(proxy_Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return proxy_SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxy_proxySchemes != nil { + if f, ok := proxy_proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + proxy_allProxyEnv = &proxy_envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + proxy_noProxyEnv = &proxy_envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type proxy_envOnce struct { + names []string + once sync.Once + val string +} + +func (e *proxy_envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *proxy_envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928 and RFC 1929. +func proxy_SOCKS5(network, addr string, auth *proxy_Auth, forward proxy_Dialer) (proxy_Dialer, error) { + s := &proxy_socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type proxy_socks5 struct { + user, password string + network, addr string + forward proxy_Dialer +} + +const proxy_socks5Version = 5 + +const ( + proxy_socks5AuthNone = 0 + proxy_socks5AuthPassword = 2 +) + +const proxy_socks5Connect = 1 + +const ( + proxy_socks5IP4 = 1 + proxy_socks5Domain = 3 + proxy_socks5IP6 = 4 +) + +var proxy_socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the given network via the SOCKS5 proxy. +func (s *proxy_socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + if err := s.connect(conn, addr); err != nil { + conn.Close() + return nil, err + } + return conn, nil +} + +// connect takes an existing connection to a socks5 proxy server, +// and commands the server to extend that connection to target, +// which must be a canonical address with a host and port. +func (s *proxy_socks5) connect(conn net.Conn, target string) error { + host, portStr, err := net.SplitHostPort(target) + if err != nil { + return err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, proxy_socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, proxy_socks5AuthNone, proxy_socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, proxy_socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + // See RFC 1929 + if buf[1] == proxy_socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, proxy_socks5Version, proxy_socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, proxy_socks5IP4) + ip = ip4 + } else { + buf = append(buf, proxy_socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return errors.New("proxy: destination host name too long: " + host) + } + buf = append(buf, proxy_socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(proxy_socks5Errors) { + failure = proxy_socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case proxy_socks5IP4: + bytesToDiscard = net.IPv4len + case proxy_socks5IP6: + bytesToDiscard = net.IPv6len + case proxy_socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + return nil +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/LICENSE b/vendor/github.com/sourcegraph/jsonrpc2/LICENSE new file mode 100644 index 000000000..89d864bd8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2016 Sourcegraph Inc + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/sourcegraph/jsonrpc2/README.md b/vendor/github.com/sourcegraph/jsonrpc2/README.md new file mode 100644 index 000000000..d2406ab07 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/README.md @@ -0,0 +1,12 @@ +# jsonrpc2: JSON-RPC 2.0 implementation for Go [![Build Status](https://travis-ci.org/sourcegraph/jsonrpc2.svg)](https://travis-ci.org/sourcegraph/jsonrpc2) [![Sourcegraph](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2/-/badge.svg)](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2?badge) [![GoDoc](https://godoc.org/github.com/sourcegraph/jsonrpc2?status.svg)](https://godoc.org/github.com/sourcegraph/jsonrpc2) + + +Package jsonrpc2 provides a [Go](https://golang.org) implementation of [JSON-RPC 2.0](http://www.jsonrpc.org/specification). + +This package is **experimental** until further notice. + +[**Open the code in Sourcegraph**](https://sourcegraph.com/github.com/sourcegraph/jsonrpc2) + +## Known issues + +* Batch requests and responses are not yet supported. diff --git a/vendor/github.com/sourcegraph/jsonrpc2/async.go b/vendor/github.com/sourcegraph/jsonrpc2/async.go new file mode 100644 index 000000000..bc8a3708a --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/async.go @@ -0,0 +1,17 @@ +package jsonrpc2 + +import "context" + +// AsyncHandler wraps a Handler such that each request is handled in its own +// goroutine. It is a convenience wrapper. +func AsyncHandler(h Handler) Handler { + return asyncHandler{h} +} + +type asyncHandler struct { + Handler +} + +func (h asyncHandler) Handle(ctx context.Context, conn *Conn, req *Request) { + go h.Handler.Handle(ctx, conn, req) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go b/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go new file mode 100644 index 000000000..b554baca8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/call_opt.go @@ -0,0 +1,30 @@ +package jsonrpc2 + +// CallOption is an option that can be provided to (*Conn).Call to +// configure custom behavior. See Meta. +type CallOption interface { + apply(r *Request) error +} + +type callOptionFunc func(r *Request) error + +func (c callOptionFunc) apply(r *Request) error { return c(r) } + +// Meta returns a call option which attaches the given meta object to +// the JSON-RPC 2.0 request (this is a Sourcegraph extension to JSON +// RPC 2.0 for carrying metadata). +func Meta(meta interface{}) CallOption { + return callOptionFunc(func(r *Request) error { + return r.SetMeta(meta) + }) +} + +// PickID returns a call option which sets the ID on a request. Care must be +// taken to ensure there are no conflicts with any previously picked ID, nor +// with the default sequence ID. +func PickID(id ID) CallOption { + return callOptionFunc(func(r *Request) error { + r.ID = id + return nil + }) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go b/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go new file mode 100644 index 000000000..e6346bae8 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/conn_opt.go @@ -0,0 +1,101 @@ +package jsonrpc2 + +import ( + "encoding/json" + "sync" +) + +// Logger interface implements one method - Printf. +// You can use the stdlib logger *log.Logger +type Logger interface { + Printf(format string, v ...interface{}) +} + +// ConnOpt is the type of function that can be passed to NewConn to +// customize the Conn before it is created. +type ConnOpt func(*Conn) + +// OnRecv causes all requests received on conn to invoke f(req, nil) +// and all responses to invoke f(req, resp), +func OnRecv(f func(*Request, *Response)) ConnOpt { + return func(c *Conn) { c.onRecv = append(c.onRecv, f) } +} + +// OnSend causes all requests sent on conn to invoke f(req, nil) and +// all responses to invoke f(nil, resp), +func OnSend(f func(*Request, *Response)) ConnOpt { + return func(c *Conn) { c.onSend = append(c.onSend, f) } +} + +// LogMessages causes all messages sent and received on conn to be +// logged using the provided logger. +func LogMessages(log Logger) ConnOpt { + return func(c *Conn) { + // Remember reqs we have received so we can helpfully show the + // request method in OnSend for responses. + var ( + mu sync.Mutex + reqMethods = map[ID]string{} + ) + + OnRecv(func(req *Request, resp *Response) { + switch { + case req != nil && resp == nil: + mu.Lock() + reqMethods[req.ID] = req.Method + mu.Unlock() + + params, _ := json.Marshal(req.Params) + if req.Notif { + log.Printf("--> notif: %s: %s", req.Method, params) + } else { + log.Printf("--> request #%s: %s: %s", req.ID, req.Method, params) + } + + case resp != nil: + var method string + if req != nil { + method = req.Method + } else { + method = "(no matching request)" + } + switch { + case resp.Result != nil: + result, _ := json.Marshal(resp.Result) + log.Printf("--> result #%s: %s: %s", resp.ID, method, result) + case resp.Error != nil: + err, _ := json.Marshal(resp.Error) + log.Printf("--> error #%s: %s: %s", resp.ID, method, err) + } + } + })(c) + OnSend(func(req *Request, resp *Response) { + switch { + case req != nil: + params, _ := json.Marshal(req.Params) + if req.Notif { + log.Printf("<-- notif: %s: %s", req.Method, params) + } else { + log.Printf("<-- request #%s: %s: %s", req.ID, req.Method, params) + } + + case resp != nil: + mu.Lock() + method := reqMethods[resp.ID] + delete(reqMethods, resp.ID) + mu.Unlock() + if method == "" { + method = "(no previous request)" + } + + if resp.Result != nil { + result, _ := json.Marshal(resp.Result) + log.Printf("<-- result #%s: %s: %s", resp.ID, method, result) + } else { + err, _ := json.Marshal(resp.Error) + log.Printf("<-- error #%s: %s: %s", resp.ID, method, err) + } + } + })(c) + } +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go b/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go new file mode 100644 index 000000000..6f056ccad --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/handler_with_error.go @@ -0,0 +1,64 @@ +package jsonrpc2 + +import ( + "context" + "log" +) + +// HandlerWithError implements Handler by calling the func for each +// request and handling returned errors and results. +func HandlerWithError(handleFunc func(context.Context, *Conn, *Request) (result interface{}, err error)) *HandlerWithErrorConfigurer { + return &HandlerWithErrorConfigurer{handleFunc: handleFunc} +} + +type HandlerWithErrorConfigurer struct { + handleFunc func(context.Context, *Conn, *Request) (result interface{}, err error) + suppressErrClosed bool +} + +// Handle implements Handler. +func (h *HandlerWithErrorConfigurer) Handle(ctx context.Context, conn *Conn, req *Request) { + result, err := h.handleFunc(ctx, conn, req) + if req.Notif { + if err != nil { + log.Printf("jsonrpc2 handler: notification %q handling error: %s", req.Method, err) + } + return + } + + resp := &Response{ID: req.ID} + if err == nil { + err = resp.SetResult(result) + } + if err != nil { + if e, ok := err.(*Error); ok { + resp.Error = e + } else { + resp.Error = &Error{Message: err.Error()} + } + } + + if !req.Notif { + if err := conn.SendResponse(ctx, resp); err != nil { + if err != ErrClosed || !h.suppressErrClosed { + log.Printf("jsonrpc2 handler: sending response %s: %s", resp.ID, err) + } + } + } +} + +// SuppressErrClosed makes the handler suppress jsonrpc2.ErrClosed errors from +// being logged. The original handler `h` is returned. +// +// This is optional because only in some cases is this behavior desired. For +// example, a handler that serves end-user connections may not want to log +// ErrClosed because it just indicates the end-user connection has gone away +// for any reason (they could have lost wifi connection, are no longer +// interested in the request and closed the connection, etc) and as such it +// would be log spam, whereas a handler that serves internal connections would +// never expect connections to go away unexpectedly (which could indicate +// service degradation, etc) and as such ErrClosed should always be logged. +func (h *HandlerWithErrorConfigurer) SuppressErrClosed() Handler { + h.suppressErrClosed = true + return h +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go b/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go new file mode 100644 index 000000000..3e0763d73 --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/jsonrpc2.go @@ -0,0 +1,691 @@ +// Package jsonrpc2 provides a client and server implementation of +// [JSON-RPC 2.0](http://www.jsonrpc.org/specification). +package jsonrpc2 + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "strconv" + "sync" +) + +// JSONRPC2 describes an interface for issuing requests that speak the +// JSON-RPC 2 protocol. It isn't really necessary for this package +// itself, but is useful for external users that use the interface as +// an API boundary. +type JSONRPC2 interface { + // Call issues a standard request (http://www.jsonrpc.org/specification#request_object). + Call(ctx context.Context, method string, params, result interface{}, opt ...CallOption) error + + // Notify issues a notification request (http://www.jsonrpc.org/specification#notification). + Notify(ctx context.Context, method string, params interface{}, opt ...CallOption) error + + // Close closes the underlying connection, if it exists. + Close() error +} + +// Request represents a JSON-RPC request or +// notification. See +// http://www.jsonrpc.org/specification#request_object and +// http://www.jsonrpc.org/specification#notification. +type Request struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + ID ID `json:"id"` + Notif bool `json:"-"` + + // Meta optionally provides metadata to include in the request. + // + // NOTE: It is not part of spec. However, it is useful for propogating + // tracing context, etc. + Meta *json.RawMessage `json:"meta,omitempty"` +} + +// MarshalJSON implements json.Marshaler and adds the "jsonrpc":"2.0" +// property. +func (r Request) MarshalJSON() ([]byte, error) { + r2 := struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + ID *ID `json:"id,omitempty"` + Meta *json.RawMessage `json:"meta,omitempty"` + JSONRPC string `json:"jsonrpc"` + }{ + Method: r.Method, + Params: r.Params, + Meta: r.Meta, + JSONRPC: "2.0", + } + if !r.Notif { + r2.ID = &r.ID + } + return json.Marshal(r2) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Request) UnmarshalJSON(data []byte) error { + var r2 struct { + Method string `json:"method"` + Params *json.RawMessage `json:"params,omitempty"` + Meta *json.RawMessage `json:"meta,omitempty"` + ID *ID `json:"id"` + } + + // Detect if the "params" field is JSON "null" or just not present + // by seeing if the field gets overwritten to nil. + r2.Params = &json.RawMessage{} + + if err := json.Unmarshal(data, &r2); err != nil { + return err + } + r.Method = r2.Method + if r2.Params == nil { + r.Params = &jsonNull + } else if len(*r2.Params) == 0 { + r.Params = nil + } else { + r.Params = r2.Params + } + r.Meta = r2.Meta + if r2.ID == nil { + r.ID = ID{} + r.Notif = true + } else { + r.ID = *r2.ID + r.Notif = false + } + return nil +} + +// SetParams sets r.Params to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Request) SetParams(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Params = (*json.RawMessage)(&b) + return nil +} + +// SetMeta sets r.Meta to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Request) SetMeta(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Meta = (*json.RawMessage)(&b) + return nil +} + +// Response represents a JSON-RPC response. See +// http://www.jsonrpc.org/specification#response_object. +type Response struct { + ID ID `json:"id"` + Result *json.RawMessage `json:"result,omitempty"` + Error *Error `json:"error,omitempty"` + + // Meta optionally provides metadata to include in the response. + // + // NOTE: It is not part of spec. However, it is useful for propogating + // tracing context, etc. + Meta *json.RawMessage `json:"meta,omitempty"` + + // SPEC NOTE: The spec says "If there was an error in detecting + // the id in the Request object (e.g. Parse error/Invalid + // Request), it MUST be Null." If we made the ID field nullable, + // then we'd have to make it a pointer type. For simplicity, we're + // ignoring the case where there was an error in detecting the ID + // in the Request object. +} + +// MarshalJSON implements json.Marshaler and adds the "jsonrpc":"2.0" +// property. +func (r Response) MarshalJSON() ([]byte, error) { + if (r.Result == nil || len(*r.Result) == 0) && r.Error == nil { + return nil, errors.New("can't marshal *jsonrpc2.Response (must have result or error)") + } + type tmpType Response // avoid infinite MarshalJSON recursion + b, err := json.Marshal(tmpType(r)) + if err != nil { + return nil, err + } + b = append(b[:len(b)-1], []byte(`,"jsonrpc":"2.0"}`)...) + return b, nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (r *Response) UnmarshalJSON(data []byte) error { + type tmpType Response + + // Detect if the "result" field is JSON "null" or just not present + // by seeing if the field gets overwritten to nil. + *r = Response{Result: &json.RawMessage{}} + + if err := json.Unmarshal(data, (*tmpType)(r)); err != nil { + return err + } + if r.Result == nil { // JSON "null" + r.Result = &jsonNull + } else if len(*r.Result) == 0 { + r.Result = nil + } + return nil +} + +// SetResult sets r.Result to the JSON representation of v. If JSON +// marshaling fails, it returns an error. +func (r *Response) SetResult(v interface{}) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + r.Result = (*json.RawMessage)(&b) + return nil +} + +// Error represents a JSON-RPC response error. +type Error struct { + Code int64 `json:"code"` + Message string `json:"message"` + Data *json.RawMessage `json:"data"` +} + +// SetError sets e.Error to the JSON representation of v. If JSON +// marshaling fails, it panics. +func (e *Error) SetError(v interface{}) { + b, err := json.Marshal(v) + if err != nil { + panic("Error.SetData: " + err.Error()) + } + e.Data = (*json.RawMessage)(&b) +} + +// Error implements the Go error interface. +func (e *Error) Error() string { + return fmt.Sprintf("jsonrpc2: code %v message: %s", e.Code, e.Message) +} + +const ( + // Errors defined in the JSON-RPC spec. See + // http://www.jsonrpc.org/specification#error_object. + CodeParseError = -32700 + CodeInvalidRequest = -32600 + CodeMethodNotFound = -32601 + CodeInvalidParams = -32602 + CodeInternalError = -32603 + codeServerErrorStart = -32099 + codeServerErrorEnd = -32000 +) + +// Handler handles JSON-RPC requests and notifications. +type Handler interface { + // Handle is called to handle a request. No other requests are handled + // until it returns. If you do not require strict ordering behaviour + // of received RPCs, it is suggested to wrap your handler in + // AsyncHandler. + Handle(context.Context, *Conn, *Request) +} + +// ID represents a JSON-RPC 2.0 request ID, which may be either a +// string or number (or null, which is unsupported). +type ID struct { + // At most one of Num or Str may be nonzero. If both are zero + // valued, then IsNum specifies which field's value is to be used + // as the ID. + Num uint64 + Str string + + // IsString controls whether the Num or Str field's value should be + // used as the ID, when both are zero valued. It must always be + // set to true if the request ID is a string. + IsString bool +} + +func (id ID) String() string { + if id.IsString { + return strconv.Quote(id.Str) + } + return strconv.FormatUint(id.Num, 10) +} + +// MarshalJSON implements json.Marshaler. +func (id ID) MarshalJSON() ([]byte, error) { + if id.IsString { + return json.Marshal(id.Str) + } + return json.Marshal(id.Num) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (id *ID) UnmarshalJSON(data []byte) error { + // Support both uint64 and string IDs. + var v uint64 + if err := json.Unmarshal(data, &v); err == nil { + *id = ID{Num: v} + return nil + } + var v2 string + if err := json.Unmarshal(data, &v2); err != nil { + return err + } + *id = ID{Str: v2, IsString: true} + return nil +} + +// Conn is a JSON-RPC client/server connection. The JSON-RPC protocol +// is symmetric, so a Conn runs on both ends of a client-server +// connection. +type Conn struct { + stream ObjectStream + + h Handler + + mu sync.Mutex + shutdown bool + closing bool + seq uint64 + pending map[ID]*call + + sending sync.Mutex + + disconnect chan struct{} + + // Set by ConnOpt funcs. + onRecv []func(*Request, *Response) + onSend []func(*Request, *Response) +} + +var _ JSONRPC2 = (*Conn)(nil) + +// ErrClosed indicates that the JSON-RPC connection is closed (or in +// the process of closing). +var ErrClosed = errors.New("jsonrpc2: connection is closed") + +// NewConn creates a new JSON-RPC client/server connection using the +// given ReadWriteCloser (typically a TCP connection or stdio). The +// JSON-RPC protocol is symmetric, so a Conn runs on both ends of a +// client-server connection. +// +// NewClient consumes conn, so you should call Close on the returned +// client not on the given conn. +func NewConn(ctx context.Context, stream ObjectStream, h Handler, opt ...ConnOpt) *Conn { + c := &Conn{ + stream: stream, + h: h, + pending: map[ID]*call{}, + disconnect: make(chan struct{}), + } + for _, opt := range opt { + opt(c) + } + go c.readMessages(ctx) + return c +} + +// Close closes the JSON-RPC connection. The connection may not be +// used after it has been closed. +func (c *Conn) Close() error { + c.mu.Lock() + if c.shutdown || c.closing { + c.mu.Unlock() + return ErrClosed + } + c.closing = true + c.mu.Unlock() + return c.stream.Close() +} + +func (c *Conn) send(ctx context.Context, m *anyMessage, wait bool) (cc *call, err error) { + c.sending.Lock() + defer c.sending.Unlock() + + // m.request.ID could be changed, so we store a copy to correctly + // clean up pending + var id ID + + c.mu.Lock() + if c.shutdown || c.closing { + c.mu.Unlock() + return nil, ErrClosed + } + + // Store requests so we can later associate them with incoming + // responses. + if m.request != nil && wait { + cc = &call{request: m.request, seq: c.seq, done: make(chan error, 1)} + if !m.request.ID.IsString && m.request.ID.Num == 0 { + // unset, use next seq as call ID + m.request.ID.Num = c.seq + } + id = m.request.ID + c.pending[id] = cc + c.seq++ + } + c.mu.Unlock() + + if len(c.onSend) > 0 { + var ( + req *Request + resp *Response + ) + switch { + case m.request != nil: + req = m.request + case m.response != nil: + resp = m.response + } + for _, onSend := range c.onSend { + onSend(req, resp) + } + } + + // From here on, if we fail to send this, then we need to remove + // this from the pending map so we don't block on it or pile up + // pending entries for unsent messages. + defer func() { + if err != nil { + if cc != nil { + c.mu.Lock() + delete(c.pending, id) + c.mu.Unlock() + } + } + }() + + if err := c.stream.WriteObject(m); err != nil { + return nil, err + } + return cc, nil +} + +// Call initiates a JSON-RPC call using the specified method and +// params, and waits for the response. If the response is successful, +// its result is stored in result (a pointer to a value that can be +// JSON-unmarshaled into); otherwise, a non-nil error is returned. +func (c *Conn) Call(ctx context.Context, method string, params, result interface{}, opts ...CallOption) error { + req := &Request{Method: method} + if err := req.SetParams(params); err != nil { + return err + } + for _, opt := range opts { + if err := opt.apply(req); err != nil { + return err + } + } + call, err := c.send(ctx, &anyMessage{request: req}, true) + if err != nil { + return err + } + select { + case err, ok := <-call.done: + if !ok { + err = ErrClosed + } + if err != nil { + return err + } + if result != nil { + if call.response.Result == nil { + call.response.Result = &jsonNull + } + // TODO(sqs): error handling + if err := json.Unmarshal(*call.response.Result, result); err != nil { + return err + } + } + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} + +var jsonNull = json.RawMessage("null") + +// Notify is like Call, but it returns when the notification request +// is sent (without waiting for a response, because JSON-RPC +// notifications do not have responses). +func (c *Conn) Notify(ctx context.Context, method string, params interface{}, opts ...CallOption) error { + req := &Request{Method: method, Notif: true} + if err := req.SetParams(params); err != nil { + return err + } + for _, opt := range opts { + if err := opt.apply(req); err != nil { + return err + } + } + _, err := c.send(ctx, &anyMessage{request: req}, false) + return err +} + +// Reply sends a successful response with a result. +func (c *Conn) Reply(ctx context.Context, id ID, result interface{}) error { + resp := &Response{ID: id} + if err := resp.SetResult(result); err != nil { + return err + } + _, err := c.send(ctx, &anyMessage{response: resp}, false) + return err +} + +// ReplyWithError sends a response with an error. +func (c *Conn) ReplyWithError(ctx context.Context, id ID, respErr *Error) error { + _, err := c.send(ctx, &anyMessage{response: &Response{ID: id, Error: respErr}}, false) + return err +} + +// SendResponse sends resp to the peer. It is lower level than (*Conn).Reply. +func (c *Conn) SendResponse(ctx context.Context, resp *Response) error { + _, err := c.send(ctx, &anyMessage{response: resp}, false) + return err +} + +// DisconnectNotify returns a channel that is closed when the +// underlying connection is disconnected. +func (c *Conn) DisconnectNotify() <-chan struct{} { + return c.disconnect +} + +func (c *Conn) readMessages(ctx context.Context) { + var err error + for err == nil { + var m anyMessage + err = c.stream.ReadObject(&m) + if err != nil { + break + } + + switch { + case m.request != nil: + for _, onRecv := range c.onRecv { + onRecv(m.request, nil) + } + c.h.Handle(ctx, c, m.request) + + case m.response != nil: + resp := m.response + if resp != nil { + id := resp.ID + c.mu.Lock() + call := c.pending[id] + delete(c.pending, id) + c.mu.Unlock() + + if call != nil { + call.response = resp + } + + if len(c.onRecv) > 0 { + var req *Request + if call != nil { + req = call.request + } + for _, onRecv := range c.onRecv { + onRecv(req, resp) + } + } + + switch { + case call == nil: + log.Printf("jsonrpc2: ignoring response #%s with no corresponding request", id) + + case resp.Error != nil: + call.done <- resp.Error + close(call.done) + + default: + call.done <- nil + close(call.done) + } + } + } + } + + c.sending.Lock() + c.mu.Lock() + c.shutdown = true + closing := c.closing + if err == io.EOF { + if closing { + err = ErrClosed + } else { + err = io.ErrUnexpectedEOF + } + } + for _, call := range c.pending { + call.done <- err + close(call.done) + } + c.mu.Unlock() + c.sending.Unlock() + if err != io.ErrUnexpectedEOF && !closing { + log.Println("jsonrpc2: protocol error:", err) + } + close(c.disconnect) +} + +// call represents a JSON-RPC call over its entire lifecycle. +type call struct { + request *Request + response *Response + seq uint64 // the seq of the request + done chan error +} + +// anyMessage represents either a JSON Request or Response. +type anyMessage struct { + request *Request + response *Response +} + +func (m anyMessage) MarshalJSON() ([]byte, error) { + var v interface{} + switch { + case m.request != nil && m.response == nil: + v = m.request + case m.request == nil && m.response != nil: + v = m.response + } + if v != nil { + return json.Marshal(v) + } + return nil, errors.New("jsonrpc2: message must have exactly one of the request or response fields set") +} + +func (m *anyMessage) UnmarshalJSON(data []byte) error { + // The presence of these fields distinguishes between the 2 + // message types. + type msg struct { + ID interface{} `json:"id"` + Method *string `json:"method"` + Result anyValueWithExplicitNull `json:"result"` + Error interface{} `json:"error"` + } + + var isRequest, isResponse bool + checkType := func(m *msg) error { + mIsRequest := m.Method != nil + mIsResponse := m.Result.null || m.Result.value != nil || m.Error != nil + if (!mIsRequest && !mIsResponse) || (mIsRequest && mIsResponse) { + return errors.New("jsonrpc2: unable to determine message type (request or response)") + } + if (mIsRequest && isResponse) || (mIsResponse && isRequest) { + return errors.New("jsonrpc2: batch message type mismatch (must be all requests or all responses)") + } + isRequest = mIsRequest + isResponse = mIsResponse + return nil + } + + if isArray := len(data) > 0 && data[0] == '['; isArray { + var msgs []msg + if err := json.Unmarshal(data, &msgs); err != nil { + return err + } + if len(msgs) == 0 { + return errors.New("jsonrpc2: invalid empty batch") + } + for _, msg := range msgs { + if err := checkType(&msg); err != nil { + return err + } + } + } else { + var msg msg + if err := json.Unmarshal(data, &msg); err != nil { + return err + } + if err := checkType(&msg); err != nil { + return err + } + } + + var v interface{} + switch { + case isRequest && !isResponse: + v = &m.request + case !isRequest && isResponse: + v = &m.response + } + if err := json.Unmarshal(data, v); err != nil { + return err + } + if !isRequest && isResponse && m.response.Error == nil && m.response.Result == nil { + m.response.Result = &jsonNull + } + return nil +} + +// anyValueWithExplicitNull is used to distinguish {} from +// {"result":null} by anyMessage's JSON unmarshaler. +type anyValueWithExplicitNull struct { + null bool // JSON "null" + value interface{} +} + +func (v anyValueWithExplicitNull) MarshalJSON() ([]byte, error) { + return json.Marshal(v.value) +} + +func (v *anyValueWithExplicitNull) UnmarshalJSON(data []byte) error { + data = bytes.TrimSpace(data) + if string(data) == "null" { + *v = anyValueWithExplicitNull{null: true} + return nil + } + *v = anyValueWithExplicitNull{} + return json.Unmarshal(data, &v.value) +} + +var ( + errInvalidRequestJSON = errors.New("jsonrpc2: request must be either a JSON object or JSON array") + errInvalidResponseJSON = errors.New("jsonrpc2: response must be either a JSON object or JSON array") +) diff --git a/vendor/github.com/sourcegraph/jsonrpc2/stream.go b/vendor/github.com/sourcegraph/jsonrpc2/stream.go new file mode 100644 index 000000000..f38c026ac --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/stream.go @@ -0,0 +1,164 @@ +package jsonrpc2 + +import ( + "bufio" + "encoding/binary" + "encoding/json" + "fmt" + "io" + "strconv" + "strings" + "sync" +) + +// An ObjectStream is a bidirectional stream of JSON-RPC 2.0 objects. +type ObjectStream interface { + // WriteObject writes a JSON-RPC 2.0 object to the stream. + WriteObject(obj interface{}) error + + // ReadObject reads the next JSON-RPC 2.0 object from the stream + // and stores it in the value pointed to by v. + ReadObject(v interface{}) error + + io.Closer +} + +// A bufferedObjectStream is an ObjectStream that uses a buffered +// io.ReadWriteCloser to send and receive objects. +type bufferedObjectStream struct { + conn io.Closer // all writes should go through w, all reads through r + w *bufio.Writer + r *bufio.Reader + + codec ObjectCodec + + mu sync.Mutex +} + +// NewBufferedStream creates a buffered stream from a network +// connection (or other similar interface). The underlying +// objectStream is used to produce the bytes to write to the stream +// for the JSON-RPC 2.0 objects. +func NewBufferedStream(conn io.ReadWriteCloser, codec ObjectCodec) ObjectStream { + return &bufferedObjectStream{ + conn: conn, + w: bufio.NewWriter(conn), + r: bufio.NewReader(conn), + codec: codec, + } +} + +// WriteObject implements ObjectStream. +func (t *bufferedObjectStream) WriteObject(obj interface{}) error { + t.mu.Lock() + defer t.mu.Unlock() + if err := t.codec.WriteObject(t.w, obj); err != nil { + return err + } + return t.w.Flush() +} + +// ReadObject implements ObjectStream. +func (t *bufferedObjectStream) ReadObject(v interface{}) error { + return t.codec.ReadObject(t.r, v) +} + +// Close implements ObjectStream. +func (t *bufferedObjectStream) Close() error { + return t.conn.Close() +} + +// An ObjectCodec specifies how to encoed and decode a JSON-RPC 2.0 +// object in a stream. +type ObjectCodec interface { + // WriteObject writes a JSON-RPC 2.0 object to the stream. + WriteObject(stream io.Writer, obj interface{}) error + + // ReadObject reads the next JSON-RPC 2.0 object from the stream + // and stores it in the value pointed to by v. + ReadObject(stream *bufio.Reader, v interface{}) error +} + +// VarintObjectCodec reads/writes JSON-RPC 2.0 objects with a varint +// header that encodes the byte length. +type VarintObjectCodec struct{} + +// WriteObject implements ObjectCodec. +func (VarintObjectCodec) WriteObject(stream io.Writer, obj interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + var buf [binary.MaxVarintLen64]byte + b := binary.PutUvarint(buf[:], uint64(len(data))) + if _, err := stream.Write(buf[:b]); err != nil { + return err + } + if _, err := stream.Write(data); err != nil { + return err + } + return nil +} + +// ReadObject implements ObjectCodec. +func (VarintObjectCodec) ReadObject(stream *bufio.Reader, v interface{}) error { + b, err := binary.ReadUvarint(stream) + if err != nil { + return err + } + return json.NewDecoder(io.LimitReader(stream, int64(b))).Decode(v) +} + +// VSCodeObjectCodec reads/writes JSON-RPC 2.0 objects with +// Content-Length and Content-Type headers, as specified by +// https://github.com/Microsoft/language-server-protocol/blob/master/protocol.md#base-protocol. +type VSCodeObjectCodec struct{} + +// WriteObject implements ObjectCodec. +func (VSCodeObjectCodec) WriteObject(stream io.Writer, obj interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + if _, err := fmt.Fprintf(stream, "Content-Length: %d\r\n\r\n", len(data)); err != nil { + return err + } + if _, err := stream.Write(data); err != nil { + return err + } + return nil +} + +// ReadObject implements ObjectCodec. +func (VSCodeObjectCodec) ReadObject(stream *bufio.Reader, v interface{}) error { + var contentLength uint64 + for { + line, err := stream.ReadString('\r') + if err != nil { + return err + } + b, err := stream.ReadByte() + if err != nil { + return err + } + if b != '\n' { + return fmt.Errorf(`jsonrpc2: line endings must be \r\n`) + } + if line == "\r" { + break + } + if strings.HasPrefix(line, "Content-Length: ") { + line = strings.TrimPrefix(line, "Content-Length: ") + line = strings.TrimSpace(line) + var err error + contentLength, err = strconv.ParseUint(line, 10, 32) + if err != nil { + return err + } + } + } + if contentLength == 0 { + return fmt.Errorf("jsonrpc2: no Content-Length header found") + } + return json.NewDecoder(io.LimitReader(stream, int64(contentLength))).Decode(v) +} diff --git a/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go b/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go new file mode 100644 index 000000000..26313a07b --- /dev/null +++ b/vendor/github.com/sourcegraph/jsonrpc2/websocket/stream.go @@ -0,0 +1,44 @@ +// Package websocket provides WebSocket transport support for JSON-RPC +// 2.0. +package websocket + +import ( + "io" + + "github.com/gorilla/websocket" +) + +// A ObjectStream is a jsonrpc2.ObjectStream that uses a WebSocket to +// send and receive JSON-RPC 2.0 objects. +type ObjectStream struct { + conn *websocket.Conn +} + +// NewObjectStream creates a new jsonrpc2.ObjectStream for sending and +// receiving JSON-RPC 2.0 objects over a WebSocket. +func NewObjectStream(conn *websocket.Conn) ObjectStream { + return ObjectStream{conn: conn} +} + +// WriteObject implements jsonrpc2.ObjectStream. +func (t ObjectStream) WriteObject(obj interface{}) error { + return t.conn.WriteJSON(obj) +} + +// ReadObject implements jsonrpc2.ObjectStream. +func (t ObjectStream) ReadObject(v interface{}) error { + err := t.conn.ReadJSON(v) + if e, ok := err.(*websocket.CloseError); ok { + if e.Code == websocket.CloseAbnormalClosure && e.Text == io.ErrUnexpectedEOF.Error() { + // Suppress a noisy (but harmless) log message by + // unwrapping this error. + err = io.ErrUnexpectedEOF + } + } + return err +} + +// Close implements jsonrpc2.ObjectStream. +func (t ObjectStream) Close() error { + return t.conn.Close() +} From 44501c08500ad27579ee1a183c77e3eb9367568c Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 27 Dec 2018 01:42:15 +0800 Subject: [PATCH 004/302] Suppress API service testing now --- api/service_test.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/api/service_test.go b/api/service_test.go index 813643c9a..1b2fa0782 100644 --- a/api/service_test.go +++ b/api/service_test.go @@ -1,15 +1,15 @@ package api_test -import ( - "testing" - - "github.com/CovenantSQL/CovenantSQL/api" -) - -func TestService(t *testing.T) { - service := &api.Service{ - WebsocketAddr: ":8546", - } - service.RunServers() - // TODO -} +// import ( +// "testing" +// +// "github.com/CovenantSQL/CovenantSQL/api" +// ) +// +// func TestService(t *testing.T) { +// service := &api.Service{ +// WebsocketAddr: ":8546", +// } +// service.RunServers() +// // TODO +// } From e3bde5ac0ba570762913f60d18659bd6d6663a29 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 00:17:31 +0800 Subject: [PATCH 005/302] Add block and transaction models for the API --- api/models/blocks.go | 54 +++++++++++++++++++++++++++++++ api/models/models.go | 38 ++++++++++++++++++++++ api/models/transactions.go | 66 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+) create mode 100644 api/models/blocks.go create mode 100644 api/models/models.go create mode 100644 api/models/transactions.go diff --git a/api/models/blocks.go b/api/models/blocks.go new file mode 100644 index 000000000..3ad5162cc --- /dev/null +++ b/api/models/blocks.go @@ -0,0 +1,54 @@ +package models + +import ( + "database/sql" + "time" +) + +// BlocksModel groups operations on Blocks. +type BlocksModel struct{} + +// Block is a block. +type Block struct { + Height int `db:"height" json:"height"` // pk + Hash string `db:"hash" json:"hash"` + Timestamp int64 `db:"timestamp" json:"-"` + TimestampHuman time.Time `db:"-" json:"timestamp"` + Version int32 `db:"version" json:"version"` + Producer string `db:"producer" json:"producer"` + MerkleRoot string `db:"merkle_root" json:"merkle_root"` + Parent string `db:"parent" json:"parent"` + TxCount int `db:"tx_count" json:"tx_count"` +} + +// GetBlockList get a list of blocks with height in [from, to). +func (m *BlocksModel) GetBlockList(from, to int) (blocks []*Block, err error) { + query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count + FROM indexed_blocks WHERE height >= ? and height < ?` + _, err = chaindb.Select(&blocks, query, from, to) + return blocks, err +} + +// GetBlockByHeight get a block by its height. +func (m *BlocksModel) GetBlockByHeight(height int) (block *Block, err error) { + block = &Block{} + query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count + FROM indexed_blocks WHERE height = ?` + err = chaindb.SelectOne(block, query, height) + if err == sql.ErrNoRows { + return nil, nil + } + return block, err +} + +// GetBlockByHash get a block by its hash. +func (m *BlocksModel) GetBlockByHash(hash string) (block *Block, err error) { + block = &Block{} + query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count + FROM indexed_blocks WHERE hash = ?` + err = chaindb.SelectOne(block, query, hash) + if err == sql.ErrNoRows { + return nil, nil + } + return block, err +} diff --git a/api/models/models.go b/api/models/models.go new file mode 100644 index 000000000..9e8809ba6 --- /dev/null +++ b/api/models/models.go @@ -0,0 +1,38 @@ +package models + +import ( + "database/sql" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/conf" + _ "github.com/CovenantSQL/go-sqlite3-encrypt" // sqlite3 driver + "github.com/go-gorp/gorp" + "github.com/pkg/errors" +) + +var ( + chaindb *gorp.DbMap +) + +// InitModels setup the models package. +func InitModels() error { + return initChainDBConnection() +} + +func initChainDBConnection() error { + dsn := fmt.Sprintf("%s?_journal=WAL&mode=ro", conf.GConf.BP.ChainFileName) + underdb, err := sql.Open("sqlite3", dsn) + if err != nil { + return errors.WithMessage(err, "unable to open chain.db") + } + chaindb = &gorp.DbMap{ + Db: underdb, + Dialect: gorp.SqliteDialect{}, + } + + // register tables + chaindb.AddTableWithName(Block{}, "indexed_blocks").SetKeys(false, "Height") + chaindb.AddTableWithName(Transaction{}, "indexed_transactions").SetKeys(false, "BlockHeight", "TxIndex") + + return nil +} diff --git a/api/models/transactions.go b/api/models/transactions.go new file mode 100644 index 000000000..071e39cf5 --- /dev/null +++ b/api/models/transactions.go @@ -0,0 +1,66 @@ +package models + +import ( + "database/sql" + "fmt" + "time" +) + +// TransactionsModel groups operations on Transactions. +type TransactionsModel struct{} + +// Transaction is a transaction. +type Transaction struct { + BlockHeight int `db:"block_height" json:"block_height"` // pk1 + TxIndex int `db:"tx_index" json:"index"` // pk2 + Hash string `db:"hash" json:"hash"` + BlockHash string `db:"block_hash" json:"block_hash"` + Timestamp int64 `db:"timestamp" json:"-"` + TimestampHuman time.Time `db:"-" json:"timestamp"` + TxType int `db:"tx_type" json:"type"` + Signee string `db:"signee" json:"signee"` + Address string `db:"address" json:"address"` + Signature string `db:"signature" json:"signature"` + Raw string `db:"raw" json:"raw"` + Tx interface{} `db:"-" json:"tx"` +} + +// GetTransactionByHash get a transaction by its hash. +func (m *TransactionsModel) GetTransactionByHash(hash string) (tx *Transaction, err error) { + tx = &Transaction{} + query := `SELECT block_height, tx_index, hash, block_hash, timestamp, tx_type, + signee, address, signature, raw + FROM indexed_transactions WHERE hash = ?` + err = chaindb.SelectOne(tx, query, hash) + if err == sql.ErrNoRows { + return nil, nil + } + return tx, err +} + +// GetTransactionList get a transaction list by hash marker. +func (m *TransactionsModel) GetTransactionList(since, direction string, limit int) ( + txs []*Transaction, err error, +) { + tx, err := m.GetTransactionByHash(since) + if tx == nil { + return txs, err + } + + orderBy := "DESC" + compare := "<" + if direction == "forward" { + orderBy = "ASC" + compare = ">" + } + + query := fmt.Sprintf(`SELECT block_height, tx_index, hash, block_hash, + timestamp, tx_type, signee, address, signature, raw + FROM indexed_transactions + WHERE block_height %s ? and tx_index %s ? + ORDER BY block_height %s, tx_index %s + LIMIT ?`, compare, compare, orderBy, orderBy) + + _, err = chaindb.Select(&txs, query, tx.BlockHeight, tx.TxIndex, limit) + return txs, err +} From ead3d6191d4ff52684e2971883dfa579f9cd5f26 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 00:18:32 +0800 Subject: [PATCH 006/302] Remove method echo --- api/echo.go | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 api/echo.go diff --git a/api/echo.go b/api/echo.go deleted file mode 100644 index 868d014ff..000000000 --- a/api/echo.go +++ /dev/null @@ -1,17 +0,0 @@ -package api - -import ( - "context" - - "github.com/sourcegraph/jsonrpc2" -) - -func init() { - registerMethod("echo", echo) -} - -func echo(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( - result interface{}, err error, -) { - return req.Params, nil -} From db30806def17dbe9446357d2d91c9f087e06e177 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 00:21:31 +0800 Subject: [PATCH 007/302] Add middleware to pre-process RPC method parameters --- api/jsonrpc.go | 21 ++++++++++++++++-- api/middleware.go | 54 +++++++++++++++++++++++++++++++++++++++++++++++ api/service.go | 8 +++++++ 3 files changed, 81 insertions(+), 2 deletions(-) create mode 100644 api/middleware.go diff --git a/api/jsonrpc.go b/api/jsonrpc.go index 0f19f97fd..8544b6a0c 100644 --- a/api/jsonrpc.go +++ b/api/jsonrpc.go @@ -3,6 +3,7 @@ package api import ( "context" "fmt" + "reflect" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" @@ -15,9 +16,21 @@ var ( type jsonrpcHandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) -func registerMethod(method string, handlerFunc jsonrpcHandlerFunc) { +func registerMethod(method string, handlerFunc jsonrpcHandlerFunc, paramsType interface{}) { log.WithField("method", method).Info("api: register rpc method") - jsonrpcHandler.RegisterMethod(method, handlerFunc) + + if paramsType == nil { + jsonrpcHandler.RegisterMethod(method, handlerFunc) + return + } + + // use a middleware component to pre-process params + typ := reflect.TypeOf(paramsType) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + jsonrpcHandler.RegisterMethod(method, processParams(handlerFunc, typ)) } // JSONRPCHandler is a handler handling JSON-RPC protocol. @@ -64,6 +77,10 @@ func (h *JSONRPCHandler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *j fn := h.methods[req.Method] if fn == nil { fn = methodNotFound + } else if req.Params == nil { + // pre-check req.Params not be nil + return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams} } + return fn(ctx, conn, req) } diff --git a/api/middleware.go b/api/middleware.go new file mode 100644 index 000000000..352807c7f --- /dev/null +++ b/api/middleware.go @@ -0,0 +1,54 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + + "github.com/sourcegraph/jsonrpc2" +) + +// Validator is designed for params checking. +type Validator interface { + Validate() error +} + +// middleware: unmarshal req.Params(JSON array) to pre-defined structures (Object) +func processParams(h jsonrpcHandlerFunc, paramsType reflect.Type) jsonrpcHandlerFunc { + return func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, + ) { + paramsNew := reflect.New(paramsType) + paramsElem := paramsNew.Elem() + paramsArray := make([]interface{}, paramsElem.NumField()) + for i := 0; i < paramsElem.NumField(); i++ { + paramsArray[i] = paramsElem.Field(i).Addr().Interface() + } + + // Unmarshal JSON array to object + // e.g. "[0,10]" --> struct { From: 0, To: 10 } + if err := json.Unmarshal(*req.Params, ¶msArray); err != nil { + return nil, err + } + + if len(paramsArray) != paramsElem.NumField() { + return nil, fmt.Errorf("unexpected parameters, expected %d but got %d", + paramsElem.NumField(), len(paramsArray)) + } + + // parameters validator + params := paramsNew.Interface() + if t, ok := params.(Validator); ok { + if err := t.Validate(); err != nil { + return nil, &jsonrpc2.Error{ + Code: jsonrpc2.CodeInvalidParams, + Message: err.Error(), + } + } + } + + ctx = context.WithValue(ctx, interface{}("_params"), params) + return h(ctx, conn, req) + } +} diff --git a/api/service.go b/api/service.go index c6186c42a..18e1240bd 100644 --- a/api/service.go +++ b/api/service.go @@ -10,6 +10,8 @@ import ( "syscall" "time" + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/gorilla/websocket" "github.com/pkg/errors" @@ -38,6 +40,12 @@ func (s *Service) StopServers() { // RunServers start API servers in a blocking way, fatal on errors. func (s *Service) RunServers() { + // setup database + if err := models.InitModels(); err != nil { + log.WithError(err).Fatal("api: init models failed") + return + } + s.stopChan = make(chan struct{}) wg := sync.WaitGroup{} From a149cf8c6b85ce2d752d63989236a5ef501fa960 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 00:22:16 +0800 Subject: [PATCH 008/302] Add rpc methods for blocks and transactions --- api/blocks.go | 60 +++++++++++++++++++++++++++++++++++++++++++++ api/transactions.go | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 111 insertions(+) create mode 100644 api/blocks.go create mode 100644 api/transactions.go diff --git a/api/blocks.go b/api/blocks.go new file mode 100644 index 000000000..055968506 --- /dev/null +++ b/api/blocks.go @@ -0,0 +1,60 @@ +package api + +import ( + "context" + "errors" + + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/sourcegraph/jsonrpc2" +) + +func init() { + registerMethod("bp_getBlockList", bpGetBlockList, bpGetBlockListParams{}) + registerMethod("bp_getBlockByHeight", bpGetBlockByHeight, bpGetBlockByHeightParams{}) + registerMethod("bp_getBlockByHash", bpGetBlockByHash, bpGetBlockByHashParams{}) +} + +type bpGetBlockListParams struct { + From int `json:"from"` + To int `json:"to"` +} + +func (params *bpGetBlockListParams) Validate() error { + diff := params.To - params.From + if diff < 5 || diff > 100 { + return errors.New("to - from should between 5 and 100") + } + return nil +} + +func bpGetBlockList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockListParams) + model := models.BlocksModel{} + return model.GetBlockList(params.From, params.To) +} + +type bpGetBlockByHeightParams struct { + Height int `json:"height"` +} + +func bpGetBlockByHeight(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockByHeightParams) + model := models.BlocksModel{} + return model.GetBlockByHeight(params.Height) +} + +type bpGetBlockByHashParams struct { + Hash string `json:"hash"` +} + +func bpGetBlockByHash(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockByHashParams) + model := models.BlocksModel{} + return model.GetBlockByHash(params.Hash) +} diff --git a/api/transactions.go b/api/transactions.go new file mode 100644 index 000000000..fd932678b --- /dev/null +++ b/api/transactions.go @@ -0,0 +1,51 @@ +package api + +import ( + "context" + "errors" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/sourcegraph/jsonrpc2" +) + +func init() { + registerMethod("bp_getTransactionList", bpGetTransactionList, bpGetTransactionListParams{}) + registerMethod("bp_getTransactionByHash", bpGetTransactionByHash, bpGetTransactionByHashParams{}) +} + +type bpGetTransactionListParams struct { + Since string `json:"since"` + Direction string `json:"direction"` + Limit int `json:"limit"` +} + +func (params *bpGetTransactionListParams) Validate() error { + if params.Limit < 5 || params.Limit > 100 { + return errors.New("limit should between 5 and 100") + } + if params.Direction != "backward" && params.Direction != "forward" { + return fmt.Errorf("unknown direction %q", params.Direction) + } + return nil +} + +func bpGetTransactionList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetTransactionListParams) + model := models.TransactionsModel{} + return model.GetTransactionList(params.Since, params.Direction, params.Limit) +} + +type bpGetTransactionByHashParams struct { + Hash string `json:"hash"` +} + +func bpGetTransactionByHash(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetBlockByHashParams) + model := models.TransactionsModel{} + return model.GetTransactionByHash(params.Hash) +} From 20ca86e5dce61712bf5bb3d103de677bfd7ca26f Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 00:23:05 +0800 Subject: [PATCH 009/302] Start json rpc websocket service with cqld bootstrap --- cmd/cqld/bootstrap.go | 10 ++++++++++ cmd/cqld/main.go | 4 ++++ 2 files changed, 14 insertions(+) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 414f23fae..d990e3558 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -24,6 +24,8 @@ import ( "syscall" "time" + "github.com/CovenantSQL/CovenantSQL/api" + bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -176,6 +178,14 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server.Stop() }() + // start json-rpc server + jsonrpcServer := &api.Service{ + WebsocketAddr: wsapiAddr, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + jsonrpcServer.StartServers() + signalCh := make(chan os.Signal, 1) signal.Notify( signalCh, diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index e0f3065f7..547225ae3 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -59,6 +59,8 @@ var ( clientMode bool clientOperation string + + wsapiAddr string ) const name = `cqld` @@ -77,6 +79,8 @@ func init() { flag.BoolVar(&clientMode, "client", false, "run as client") flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") + flag.StringVar(&wsapiAddr, "wsapi", ":8546", "Address of the websocket JSON-RPC API") + flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) From 1fc193b587a7c8e7b706537d2e73a2f44658ab56 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 10:01:20 +0800 Subject: [PATCH 010/302] Update dependencies, add gorp db helper --- Gopkg.lock | 9 + .../github.com/go-gorp/gorp/CONTRIBUTING.md | 34 + vendor/github.com/go-gorp/gorp/LICENSE | 22 + vendor/github.com/go-gorp/gorp/README.md | 805 ++++++++++++++++++ vendor/github.com/go-gorp/gorp/column.go | 83 ++ vendor/github.com/go-gorp/gorp/db.go | 787 +++++++++++++++++ vendor/github.com/go-gorp/gorp/dialect.go | 112 +++ .../github.com/go-gorp/gorp/dialect_mysql.go | 176 ++++ .../github.com/go-gorp/gorp/dialect_oracle.go | 146 ++++ .../go-gorp/gorp/dialect_postgres.go | 156 ++++ .../github.com/go-gorp/gorp/dialect_sqlite.go | 119 +++ .../go-gorp/gorp/dialect_sqlserver.go | 152 ++++ vendor/github.com/go-gorp/gorp/errors.go | 38 + vendor/github.com/go-gorp/gorp/gorp.go | 608 +++++++++++++ vendor/github.com/go-gorp/gorp/gorp_go17.go | 54 ++ vendor/github.com/go-gorp/gorp/gorp_go18.go | 81 ++ vendor/github.com/go-gorp/gorp/hooks.go | 49 ++ vendor/github.com/go-gorp/gorp/index.go | 56 ++ vendor/github.com/go-gorp/gorp/lockerror.go | 63 ++ vendor/github.com/go-gorp/gorp/logging.go | 44 + vendor/github.com/go-gorp/gorp/nulltypes.go | 58 ++ vendor/github.com/go-gorp/gorp/select.go | 366 ++++++++ vendor/github.com/go-gorp/gorp/table.go | 247 ++++++ .../github.com/go-gorp/gorp/table_bindings.go | 312 +++++++ vendor/github.com/go-gorp/gorp/test_all.sh | 41 + vendor/github.com/go-gorp/gorp/transaction.go | 202 +++++ 26 files changed, 4820 insertions(+) create mode 100644 vendor/github.com/go-gorp/gorp/CONTRIBUTING.md create mode 100644 vendor/github.com/go-gorp/gorp/LICENSE create mode 100644 vendor/github.com/go-gorp/gorp/README.md create mode 100644 vendor/github.com/go-gorp/gorp/column.go create mode 100644 vendor/github.com/go-gorp/gorp/db.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect_mysql.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect_oracle.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect_postgres.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect_sqlite.go create mode 100644 vendor/github.com/go-gorp/gorp/dialect_sqlserver.go create mode 100644 vendor/github.com/go-gorp/gorp/errors.go create mode 100644 vendor/github.com/go-gorp/gorp/gorp.go create mode 100644 vendor/github.com/go-gorp/gorp/gorp_go17.go create mode 100644 vendor/github.com/go-gorp/gorp/gorp_go18.go create mode 100644 vendor/github.com/go-gorp/gorp/hooks.go create mode 100644 vendor/github.com/go-gorp/gorp/index.go create mode 100644 vendor/github.com/go-gorp/gorp/lockerror.go create mode 100644 vendor/github.com/go-gorp/gorp/logging.go create mode 100644 vendor/github.com/go-gorp/gorp/nulltypes.go create mode 100644 vendor/github.com/go-gorp/gorp/select.go create mode 100644 vendor/github.com/go-gorp/gorp/table.go create mode 100644 vendor/github.com/go-gorp/gorp/table_bindings.go create mode 100755 vendor/github.com/go-gorp/gorp/test_all.sh create mode 100644 vendor/github.com/go-gorp/gorp/transaction.go diff --git a/Gopkg.lock b/Gopkg.lock index fcf0c7b8e..17b5b31e5 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -175,6 +175,14 @@ revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" version = "v1.2.0" +[[projects]] + digest = "1:670d1f29fa2aa15ea777cc5bcf95881f379bf8a71dbbe145be0774da97fede72" + name = "github.com/go-gorp/gorp" + packages = ["."] + pruneopts = "UT" + revision = "4df78490a9aa9a78b9b02b0c913df8dc1954faee" + version = "2.1" + [[projects]] branch = "master" digest = "1:48c0fa64e80c089a88d30ab5b826c106af79eb3c65d48e2280f22aa4d61d7a84" @@ -659,6 +667,7 @@ "github.com/cyberdelia/go-metrics-graphite", "github.com/dyatlov/go-opengraph/opengraph", "github.com/fortytw2/leaktest", + "github.com/go-gorp/gorp", "github.com/gorilla/handlers", "github.com/gorilla/mux", "github.com/gorilla/websocket", diff --git a/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md b/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md new file mode 100644 index 000000000..7bc145fd7 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# Contributions are very welcome! + +## First: Create an Issue + +Even if your fix is simple, we'd like to have an issue to relate to +the PR. Discussion about the architecture and value can go on the +issue, leaving PR comments exclusively for coding style. + +## Second: Make Your PR + +- Fork the `master` branch +- Make your change +- Make a PR against the `master` branch + +You don't need to wait for comments on the issue before making your +PR. If you do wait for comments, you'll have a better chance of +getting your PR accepted the first time around, but it's not +necessary. + +## Third: Be Patient + +- If your change breaks backward compatibility, this becomes + especially true. + +We all have lives and jobs, and many of us are no longer on projects +that make use of `gorp`. We will get back to you, but it might take a +while. + +## Fourth: Consider Becoming a Maintainer + +We really do need help. We will likely ask you for help after a good +PR, but if we don't, please create an issue requesting maintainership. +Considering how few of us are currently active, we are unlikely to +refuse good help. diff --git a/vendor/github.com/go-gorp/gorp/LICENSE b/vendor/github.com/go-gorp/gorp/LICENSE new file mode 100644 index 000000000..b661111d0 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2012 James Cooper + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/go-gorp/gorp/README.md b/vendor/github.com/go-gorp/gorp/README.md new file mode 100644 index 000000000..87cb7ba7a --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/README.md @@ -0,0 +1,805 @@ +# Go Relational Persistence + +[![build status](https://img.shields.io/travis/go-gorp/gorp/master.svg)](http://travis-ci.org/go-gorp/gorp) +[![code coverage](https://img.shields.io/coveralls/go-gorp/gorp.svg)](https://coveralls.io/r/go-gorp/gorp) +[![issues](https://img.shields.io/github/issues/go-gorp/gorp.svg)](https://github.com/go-gorp/gorp/issues) +[![godoc v1](https://img.shields.io/badge/godoc-v1-375EAB.svg)](https://godoc.org/gopkg.in/gorp.v1) +[![godoc v2](https://img.shields.io/badge/godoc-v2-375EAB.svg)](https://godoc.org/gopkg.in/gorp.v2) +[![godoc bleeding edge](https://img.shields.io/badge/godoc-bleeding--edge-375EAB.svg)](https://godoc.org/github.com/go-gorp/gorp) + +### Update 2016-11-13: Future versions + +As many of the maintainers have become busy with other projects, +progress toward the ever-elusive v2 has slowed to the point that we're +only occasionally making progress outside of merging pull requests. +In the interest of continuing to release, I'd like to lean toward a +more maintainable path forward. + +For the moment, I am releasing a v2 tag with the current feature set +from master, as some of those features have been actively used and +relied on by more than one project. Our next goal is to continue +cleaning up the code base with non-breaking changes as much as +possible, but if/when a breaking change is needed, we'll just release +new versions. This allows us to continue development at whatever pace +we're capable of, without delaying the release of features or refusing +PRs. + +## Introduction + +I hesitate to call gorp an ORM. Go doesn't really have objects, at +least not in the classic Smalltalk/Java sense. There goes the "O". +gorp doesn't know anything about the relationships between your +structs (at least not yet). So the "R" is questionable too (but I use +it in the name because, well, it seemed more clever). + +The "M" is alive and well. Given some Go structs and a database, gorp +should remove a fair amount of boilerplate busy-work from your code. + +I hope that gorp saves you time, minimizes the drudgery of getting +data in and out of your database, and helps your code focus on +algorithms, not infrastructure. + +* Bind struct fields to table columns via API or tag +* Support for embedded structs +* Support for transactions +* Forward engineer db schema from structs (great for unit tests) +* Pre/post insert/update/delete hooks +* Automatically generate insert/update/delete statements for a struct +* Automatic binding of auto increment PKs back to struct after insert +* Delete by primary key(s) +* Select by primary key(s) +* Optional trace sql logging +* Bind arbitrary SQL queries to a struct +* Bind slice to SELECT query results without type assertions +* Use positional or named bind parameters in custom SELECT queries +* Optional optimistic locking using a version column (for + update/deletes) + +## Installation + +Use `go get` or your favorite vendoring tool, using whichever import +path you'd like. + +## Versioning + +We use semantic version tags. Feel free to import through `gopkg.in` +(e.g. `gopkg.in/gorp.v2`) to get the latest tag for a major version, +or check out the tag using your favorite vendoring tool. + +Development is not very active right now, but we have plans to +restructure `gorp` as we continue to move toward a more extensible +system. Whenever a breaking change is needed, the major version will +be bumped. + +The `master` branch is where all development is done, and breaking +changes may happen from time to time. That said, if you want to live +on the bleeding edge and are comfortable updating your code when we +make a breaking change, you may use `github.com/go-gorp/gorp` as your +import path. + +Check the version tags to see what's available. We'll make a good +faith effort to add badges for new versions, but we make no +guarantees. + +## Supported Go versions + +This package is guaranteed to be compatible with the latest 2 major +versions of Go. + +Any earlier versions are only supported on a best effort basis and can +be dropped any time. Go has a great compatibility promise. Upgrading +your program to a newer version of Go should never really be a +problem. + +## Migration guide + +#### Pre-v2 to v2 +Automatic mapping of the version column used in optimistic locking has +been removed as it could cause problems if the type was not int. The +version column must now explicitly be set with +`tablemap.SetVersionCol()`. + +## Help/Support + +Use our [`gitter` channel](https://gitter.im/go-gorp/gorp). We used +to use IRC, but with most of us being pulled in many directions, we +often need the email notifications from `gitter` to yell at us to sign +in. + +## Quickstart + +```go +package main + +import ( + "database/sql" + "gopkg.in/gorp.v1" + _ "github.com/mattn/go-sqlite3" + "log" + "time" +) + +func main() { + // initialize the DbMap + dbmap := initDb() + defer dbmap.Db.Close() + + // delete any existing rows + err := dbmap.TruncateTables() + checkErr(err, "TruncateTables failed") + + // create two posts + p1 := newPost("Go 1.1 released!", "Lorem ipsum lorem ipsum") + p2 := newPost("Go 1.2 released!", "Lorem ipsum lorem ipsum") + + // insert rows - auto increment PKs will be set properly after the insert + err = dbmap.Insert(&p1, &p2) + checkErr(err, "Insert failed") + + // use convenience SelectInt + count, err := dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Rows after inserting:", count) + + // update a row + p2.Title = "Go 1.2 is better than ever" + count, err = dbmap.Update(&p2) + checkErr(err, "Update failed") + log.Println("Rows updated:", count) + + // fetch one row - note use of "post_id" instead of "Id" since column is aliased + // + // Postgres users should use $1 instead of ? placeholders + // See 'Known Issues' below + // + err = dbmap.SelectOne(&p2, "select * from posts where post_id=?", p2.Id) + checkErr(err, "SelectOne failed") + log.Println("p2 row:", p2) + + // fetch all rows + var posts []Post + _, err = dbmap.Select(&posts, "select * from posts order by post_id") + checkErr(err, "Select failed") + log.Println("All rows:") + for x, p := range posts { + log.Printf(" %d: %v\n", x, p) + } + + // delete row by PK + count, err = dbmap.Delete(&p1) + checkErr(err, "Delete failed") + log.Println("Rows deleted:", count) + + // delete row manually via Exec + _, err = dbmap.Exec("delete from posts where post_id=?", p2.Id) + checkErr(err, "Exec failed") + + // confirm count is zero + count, err = dbmap.SelectInt("select count(*) from posts") + checkErr(err, "select count(*) failed") + log.Println("Row count - should be zero:", count) + + log.Println("Done!") +} + +type Post struct { + // db tag lets you specify the column name if it differs from the struct field + Id int64 `db:"post_id"` + Created int64 + Title string `db:",size:50"` // Column size set to 50 + Body string `db:"article_body,size:1024"` // Set both column name and size +} + +func newPost(title, body string) Post { + return Post{ + Created: time.Now().UnixNano(), + Title: title, + Body: body, + } +} + +func initDb() *gorp.DbMap { + // connect to db using standard Go database/sql API + // use whatever database/sql driver you wish + db, err := sql.Open("sqlite3", "/tmp/post_db.bin") + checkErr(err, "sql.Open failed") + + // construct a gorp DbMap + dbmap := &gorp.DbMap{Db: db, Dialect: gorp.SqliteDialect{}} + + // add a table, setting the table name to 'posts' and + // specifying that the Id property is an auto incrementing PK + dbmap.AddTableWithName(Post{}, "posts").SetKeys(true, "Id") + + // create the table. in a production system you'd generally + // use a migration tool, or create the tables via scripts + err = dbmap.CreateTablesIfNotExists() + checkErr(err, "Create tables failed") + + return dbmap +} + +func checkErr(err error, msg string) { + if err != nil { + log.Fatalln(msg, err) + } +} +``` + +## Examples + +### Mapping structs to tables + +First define some types: + +```go +type Invoice struct { + Id int64 + Created int64 + Updated int64 + Memo string + PersonId int64 +} + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string +} + +// Example of using tags to alias fields to column names +// The 'db' value is the column name +// +// A hyphen will cause gorp to skip this field, similar to the +// Go json package. +// +// This is equivalent to using the ColMap methods: +// +// table := dbmap.AddTableWithName(Product{}, "product") +// table.ColMap("Id").Rename("product_id") +// table.ColMap("Price").Rename("unit_price") +// table.ColMap("IgnoreMe").SetTransient(true) +// +// You can optionally declare the field to be a primary key and/or autoincrement +// +type Product struct { + Id int64 `db:"product_id, primarykey, autoincrement"` + Price int64 `db:"unit_price"` + IgnoreMe string `db:"-"` +} +``` + +Then create a mapper, typically you'd do this one time at app startup: + +```go +// connect to db using standard Go database/sql API +// use whatever database/sql driver you wish +db, err := sql.Open("mymysql", "tcp:localhost:3306*mydb/myuser/mypassword") + +// construct a gorp DbMap +dbmap := &gorp.DbMap{Db: db, Dialect: gorp.MySQLDialect{"InnoDB", "UTF8"}} + +// register the structs you wish to use with gorp +// you can also use the shorter dbmap.AddTable() if you +// don't want to override the table name +// +// SetKeys(true) means we have a auto increment primary key, which +// will get automatically bound to your struct post-insert +// +t1 := dbmap.AddTableWithName(Invoice{}, "invoice_test").SetKeys(true, "Id") +t2 := dbmap.AddTableWithName(Person{}, "person_test").SetKeys(true, "Id") +t3 := dbmap.AddTableWithName(Product{}, "product_test").SetKeys(true, "Id") +``` + +### Struct Embedding + +gorp supports embedding structs. For example: + +```go +type Names struct { + FirstName string + LastName string +} + +type WithEmbeddedStruct struct { + Id int64 + Names +} + +es := &WithEmbeddedStruct{-1, Names{FirstName: "Alice", LastName: "Smith"}} +err := dbmap.Insert(es) +``` + +See the `TestWithEmbeddedStruct` function in `gorp_test.go` for a full example. + +### Create/Drop Tables ### + +Automatically create / drop registered tables. This is useful for unit tests +but is entirely optional. You can of course use gorp with tables created manually, +or with a separate migration tool (like [goose](https://bitbucket.org/liamstask/goose) or [migrate](https://github.com/mattes/migrate)). + +```go +// create all registered tables +dbmap.CreateTables() + +// same as above, but uses "if not exists" clause to skip tables that are +// already defined +dbmap.CreateTablesIfNotExists() + +// drop +dbmap.DropTables() +``` + +### SQL Logging + +Optionally you can pass in a logger to trace all SQL statements. +I recommend enabling this initially while you're getting the feel for what +gorp is doing on your behalf. + +Gorp defines a `GorpLogger` interface that Go's built in `log.Logger` satisfies. +However, you can write your own `GorpLogger` implementation, or use a package such +as `glog` if you want more control over how statements are logged. + +```go +// Will log all SQL statements + args as they are run +// The first arg is a string prefix to prepend to all log messages +dbmap.TraceOn("[gorp]", log.New(os.Stdout, "myapp:", log.Lmicroseconds)) + +// Turn off tracing +dbmap.TraceOff() +``` + +### Insert + +```go +// Must declare as pointers so optional callback hooks +// can operate on your data, not copies +inv1 := &Invoice{0, 100, 200, "first order", 0} +inv2 := &Invoice{0, 100, 200, "second order", 0} + +// Insert your rows +err := dbmap.Insert(inv1, inv2) + +// Because we called SetKeys(true) on Invoice, the Id field +// will be populated after the Insert() automatically +fmt.Printf("inv1.Id=%d inv2.Id=%d\n", inv1.Id, inv2.Id) +``` + +### Update + +Continuing the above example, use the `Update` method to modify an Invoice: + +```go +// count is the # of rows updated, which should be 1 in this example +count, err := dbmap.Update(inv1) +``` + +### Delete + +If you have primary key(s) defined for a struct, you can use the `Delete` +method to remove rows: + +```go +count, err := dbmap.Delete(inv1) +``` + +### Select by Key + +Use the `Get` method to fetch a single row by primary key. It returns +nil if no row is found. + +```go +// fetch Invoice with Id=99 +obj, err := dbmap.Get(Invoice{}, 99) +inv := obj.(*Invoice) +``` + +### Ad Hoc SQL + +#### SELECT + +`Select()` and `SelectOne()` provide a simple way to bind arbitrary queries to a slice +or a single struct. + +```go +// Select a slice - first return value is not needed when a slice pointer is passed to Select() +var posts []Post +_, err := dbmap.Select(&posts, "select * from post order by id") + +// You can also use primitive types +var ids []string +_, err := dbmap.Select(&ids, "select id from post") + +// Select a single row. +// Returns an error if no row found, or if more than one row is found +var post Post +err := dbmap.SelectOne(&post, "select * from post where id=?", id) +``` + +Want to do joins? Just write the SQL and the struct. gorp will bind them: + +```go +// Define a type for your join +// It *must* contain all the columns in your SELECT statement +// +// The names here should match the aliased column names you specify +// in your SQL - no additional binding work required. simple. +// +type InvoicePersonView struct { + InvoiceId int64 + PersonId int64 + Memo string + FName string +} + +// Create some rows +p1 := &Person{0, 0, 0, "bob", "smith"} +dbmap.Insert(p1) + +// notice how we can wire up p1.Id to the invoice easily +inv1 := &Invoice{0, 0, 0, "xmas order", p1.Id} +dbmap.Insert(inv1) + +// Run your query +query := "select i.Id InvoiceId, p.Id PersonId, i.Memo, p.FName " + + "from invoice_test i, person_test p " + + "where i.PersonId = p.Id" + +// pass a slice to Select() +var list []InvoicePersonView +_, err := dbmap.Select(&list, query) + +// this should test true +expected := InvoicePersonView{inv1.Id, p1.Id, inv1.Memo, p1.FName} +if reflect.DeepEqual(list[0], expected) { + fmt.Println("Woot! My join worked!") +} +``` + +#### SELECT string or int64 + +gorp provides a few convenience methods for selecting a single string or int64. + +```go +// select single int64 from db (use $1 instead of ? for postgresql) +i64, err := dbmap.SelectInt("select count(*) from foo where blah=?", blahVal) + +// select single string from db: +s, err := dbmap.SelectStr("select name from foo where blah=?", blahVal) + +``` + +#### Named bind parameters + +You may use a map or struct to bind parameters by name. This is currently +only supported in SELECT queries. + +```go +_, err := dbm.Select(&dest, "select * from Foo where name = :name and age = :age", map[string]interface{}{ + "name": "Rob", + "age": 31, +}) +``` + +#### UPDATE / DELETE + +You can execute raw SQL if you wish. Particularly good for batch operations. + +```go +res, err := dbmap.Exec("delete from invoice_test where PersonId=?", 10) +``` + +### Transactions + +You can batch operations into a transaction: + +```go +func InsertInv(dbmap *DbMap, inv *Invoice, per *Person) error { + // Start a new transaction + trans, err := dbmap.Begin() + if err != nil { + return err + } + + trans.Insert(per) + inv.PersonId = per.Id + trans.Insert(inv) + + // if the commit is successful, a nil error is returned + return trans.Commit() +} +``` + +### Hooks + +Use hooks to update data before/after saving to the db. Good for timestamps: + +```go +// implement the PreInsert and PreUpdate hooks +func (i *Invoice) PreInsert(s gorp.SqlExecutor) error { + i.Created = time.Now().UnixNano() + i.Updated = i.Created + return nil +} + +func (i *Invoice) PreUpdate(s gorp.SqlExecutor) error { + i.Updated = time.Now().UnixNano() + return nil +} + +// You can use the SqlExecutor to cascade additional SQL +// Take care to avoid cycles. gorp won't prevent them. +// +// Here's an example of a cascading delete +// +func (p *Person) PreDelete(s gorp.SqlExecutor) error { + query := "delete from invoice_test where PersonId=?" + + _, err := s.Exec(query, p.Id) + + if err != nil { + return err + } + return nil +} +``` + +Full list of hooks that you can implement: + + PostGet + PreInsert + PostInsert + PreUpdate + PostUpdate + PreDelete + PostDelete + + All have the same signature. for example: + + func (p *MyStruct) PostUpdate(s gorp.SqlExecutor) error + +### Optimistic Locking + +#### Note that this behaviour has changed in v2. See [Migration Guide](#migration-guide). + +gorp provides a simple optimistic locking feature, similar to Java's +JPA, that will raise an error if you try to update/delete a row whose +`version` column has a value different than the one in memory. This +provides a safe way to do "select then update" style operations +without explicit read and write locks. + +```go +// Version is an auto-incremented number, managed by gorp +// If this property is present on your struct, update +// operations will be constrained +// +// For example, say we defined Person as: + +type Person struct { + Id int64 + Created int64 + Updated int64 + FName string + LName string + + // automatically used as the Version col + // use table.SetVersionCol("columnName") to map a different + // struct field as the version field + Version int64 +} + +p1 := &Person{0, 0, 0, "Bob", "Smith", 0} +dbmap.Insert(p1) // Version is now 1 + +obj, err := dbmap.Get(Person{}, p1.Id) +p2 := obj.(*Person) +p2.LName = "Edwards" +dbmap.Update(p2) // Version is now 2 + +p1.LName = "Howard" + +// Raises error because p1.Version == 1, which is out of date +count, err := dbmap.Update(p1) +_, ok := err.(gorp.OptimisticLockError) +if ok { + // should reach this statement + + // in a real app you might reload the row and retry, or + // you might propegate this to the user, depending on the desired + // semantics + fmt.Printf("Tried to update row with stale data: %v\n", err) +} else { + // some other db error occurred - log or return up the stack + fmt.Printf("Unknown db err: %v\n", err) +} +``` +### Adding INDEX(es) on column(s) beyond the primary key ### + +Indexes are frequently critical for performance. Here is how to add +them to your tables. + +NB: SqlServer and Oracle need testing and possible adjustment to the +CreateIndexSuffix() and DropIndexSuffix() methods to make AddIndex() +work for them. + +In the example below we put an index both on the Id field, and on the +AcctId field. + +``` +type Account struct { + Id int64 + AcctId string // e.g. this might be a long uuid for portability +} + +// indexType (the 2nd param to AddIndex call) is "Btree" or "Hash" for MySQL. +// demonstrate adding a second index on AcctId, and constrain that field to have unique values. +dbm.AddTable(iptab.Account{}).SetKeys(true, "Id").AddIndex("AcctIdIndex", "Btree", []string{"AcctId"}).SetUnique(true) + +err = dbm.CreateTablesIfNotExists() +checkErr(err, "CreateTablesIfNotExists failed") + +err = dbm.CreateIndex() +checkErr(err, "CreateIndex failed") + +``` +Check the effect of the CreateIndex() call in mysql: +``` +$ mysql + +MariaDB [test]> show create table Account; ++---------+--------------------------+ +| Account | CREATE TABLE `Account` ( + `Id` bigint(20) NOT NULL AUTO_INCREMENT, + `AcctId` varchar(255) DEFAULT NULL, + PRIMARY KEY (`Id`), + UNIQUE KEY `AcctIdIndex` (`AcctId`) USING BTREE <<<--- yes! index added. +) ENGINE=InnoDB DEFAULT CHARSET=utf8 ++---------+--------------------------+ + +``` + + +## Database Drivers + +gorp uses the Go 1 `database/sql` package. A full list of compliant +drivers is available here: + +http://code.google.com/p/go-wiki/wiki/SQLDrivers + +Sadly, SQL databases differ on various issues. gorp provides a Dialect +interface that should be implemented per database vendor. Dialects +are provided for: + +* MySQL +* PostgreSQL +* sqlite3 + +Each of these three databases pass the test suite. See `gorp_test.go` +for example DSNs for these three databases. + +Support is also provided for: + +* Oracle (contributed by @klaidliadon) +* SQL Server (contributed by @qrawl) - use driver: + github.com/denisenkom/go-mssqldb + +Note that these databases are not covered by CI and I (@coopernurse) +have no good way to test them locally. So please try them and send +patches as needed, but expect a bit more unpredicability. + +## Sqlite3 Extensions + +In order to use sqlite3 extensions you need to first register a custom driver: + +```go +import ( + "database/sql" + + // use whatever database/sql driver you wish + sqlite "github.com/mattn/go-sqlite3" +) + +func customDriver() (*sql.DB, error) { + + // create custom driver with extensions defined + sql.Register("sqlite3-custom", &sqlite.SQLiteDriver{ + Extensions: []string{ + "mod_spatialite", + }, + }) + + // now you can then connect using the 'sqlite3-custom' driver instead of 'sqlite3' + return sql.Open("sqlite3-custom", "/tmp/post_db.bin") +} +``` + +## Known Issues + +### SQL placeholder portability + +Different databases use different strings to indicate variable +placeholders in prepared SQL statements. Unlike some database +abstraction layers (such as JDBC), Go's `database/sql` does not +standardize this. + +SQL generated by gorp in the `Insert`, `Update`, `Delete`, and `Get` +methods delegates to a Dialect implementation for each database, and +will generate portable SQL. + +Raw SQL strings passed to `Exec`, `Select`, `SelectOne`, `SelectInt`, +etc will not be parsed. Consequently you may have portability issues +if you write a query like this: + +```go +// works on MySQL and Sqlite3, but not with Postgresql err := +dbmap.SelectOne(&val, "select * from foo where id = ?", 30) +``` + +In `Select` and `SelectOne` you can use named parameters to work +around this. The following is portable: + +```go +err := dbmap.SelectOne(&val, "select * from foo where id = :id", +map[string]interface{} { "id": 30}) +``` + +Additionally, when using Postgres as your database, you should utilize +`$1` instead of `?` placeholders as utilizing `?` placeholders when +querying Postgres will result in `pq: operator does not exist` +errors. Alternatively, use `dbMap.Dialect.BindVar(varIdx)` to get the +proper variable binding for your dialect. + +### time.Time and time zones + +gorp will pass `time.Time` fields through to the `database/sql` +driver, but note that the behavior of this type varies across database +drivers. + +MySQL users should be especially cautious. See: +https://github.com/ziutek/mymysql/pull/77 + +To avoid any potential issues with timezone/DST, consider: + +- Using an integer field for time data and storing UNIX time. +- Using a custom time type that implements some SQL types: + - [`"database/sql".Scanner`](https://golang.org/pkg/database/sql/#Scanner) + - [`"database/sql/driver".Valuer`](https://golang.org/pkg/database/sql/driver/#Valuer) + +## Running the tests + +The included tests may be run against MySQL, Postgresql, or sqlite3. +You must set two environment variables so the test code knows which +driver to use, and how to connect to your database. + +```sh +# MySQL example: +export GORP_TEST_DSN=gomysql_test/gomysql_test/abc123 +export GORP_TEST_DIALECT=mysql + +# run the tests +go test + +# run the tests and benchmarks +go test -bench="Bench" -benchtime 10 +``` + +Valid `GORP_TEST_DIALECT` values are: "mysql"(for mymysql), +"gomysql"(for go-sql-driver), "postgres", "sqlite" See the +`test_all.sh` script for examples of all 3 databases. This is the +script I run locally to test the library. + +## Performance + +gorp uses reflection to construct SQL queries and bind parameters. +See the BenchmarkNativeCrud vs BenchmarkGorpCrud in gorp_test.go for a +simple perf test. On my MacBook Pro gorp is about 2-3% slower than +hand written SQL. + + +## Contributors + +* matthias-margush - column aliasing via tags +* Rob Figueiredo - @robfig +* Quinn Slack - @sqs diff --git a/vendor/github.com/go-gorp/gorp/column.go b/vendor/github.com/go-gorp/gorp/column.go new file mode 100644 index 000000000..99d4fd555 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/column.go @@ -0,0 +1,83 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import "reflect" + +// ColumnMap represents a mapping between a Go struct field and a single +// column in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type ColumnMap struct { + // Column name in db table + ColumnName string + + // If true, this column is skipped in generated SQL statements + Transient bool + + // If true, " unique" is added to create table statements. + // Not used elsewhere + Unique bool + + // Query used for getting generated id after insert + GeneratedIdQuery string + + // Passed to Dialect.ToSqlType() to assist in informing the + // correct column type to map to in CreateTables() + MaxSize int + + DefaultValue string + + fieldName string + gotype reflect.Type + isPK bool + isAutoIncr bool + isNotNull bool +} + +// Rename allows you to specify the column name in the table +// +// Example: table.ColMap("Updated").Rename("date_updated") +// +func (c *ColumnMap) Rename(colname string) *ColumnMap { + c.ColumnName = colname + return c +} + +// SetTransient allows you to mark the column as transient. If true +// this column will be skipped when SQL statements are generated +func (c *ColumnMap) SetTransient(b bool) *ColumnMap { + c.Transient = b + return c +} + +// SetUnique adds "unique" to the create table statements for this +// column, if b is true. +func (c *ColumnMap) SetUnique(b bool) *ColumnMap { + c.Unique = b + return c +} + +// SetNotNull adds "not null" to the create table statements for this +// column, if nn is true. +func (c *ColumnMap) SetNotNull(nn bool) *ColumnMap { + c.isNotNull = nn + return c +} + +// SetMaxSize specifies the max length of values of this column. This is +// passed to the dialect.ToSqlType() function, which can use the value +// to alter the generated type for "create table" statements +func (c *ColumnMap) SetMaxSize(size int) *ColumnMap { + c.MaxSize = size + return c +} diff --git a/vendor/github.com/go-gorp/gorp/db.go b/vendor/github.com/go-gorp/gorp/db.go new file mode 100644 index 000000000..dfb92c952 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/db.go @@ -0,0 +1,787 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "log" + "reflect" + "strconv" + "strings" + "time" +) + +// DbMap is the root gorp mapping object. Create one of these for each +// database schema you wish to map. Each DbMap contains a list of +// mapped tables. +// +// Example: +// +// dialect := gorp.MySQLDialect{"InnoDB", "UTF8"} +// dbmap := &gorp.DbMap{Db: db, Dialect: dialect} +// +type DbMap struct { + ctx context.Context + + // Db handle to use with this map + Db *sql.DB + + // Dialect implementation to use with this map + Dialect Dialect + + TypeConverter TypeConverter + + tables []*TableMap + tablesDynamic map[string]*TableMap // tables that use same go-struct and different db table names + logger GorpLogger + logPrefix string +} + +func (m *DbMap) dynamicTableAdd(tableName string, tbl *TableMap) { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + m.tablesDynamic[tableName] = tbl +} + +func (m *DbMap) dynamicTableFind(tableName string) (*TableMap, bool) { + if m.tablesDynamic == nil { + return nil, false + } + tbl, found := m.tablesDynamic[tableName] + return tbl, found +} + +func (m *DbMap) dynamicTableMap() map[string]*TableMap { + if m.tablesDynamic == nil { + m.tablesDynamic = make(map[string]*TableMap) + } + return m.tablesDynamic +} + +func (m *DbMap) WithContext(ctx context.Context) SqlExecutor { + copy := &DbMap{} + *copy = *m + copy.ctx = ctx + return copy +} + +func (m *DbMap) CreateIndex() error { + var err error + dialect := reflect.TypeOf(m.Dialect) + for _, table := range m.tables { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + for _, table := range m.dynamicTableMap() { + for _, index := range table.indexes { + err = m.createIndexImpl(dialect, table, index) + if err != nil { + break + } + } + } + + return err +} + +func (m *DbMap) createIndexImpl(dialect reflect.Type, + table *TableMap, + index *IndexMap) error { + s := bytes.Buffer{} + s.WriteString("create") + if index.Unique { + s.WriteString(" unique") + } + s.WriteString(" index") + s.WriteString(fmt.Sprintf(" %s on %s", index.IndexName, table.TableName)) + if dname := dialect.Name(); dname == "PostgresDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(" (") + for x, col := range index.columns { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(m.Dialect.QuoteField(col)) + } + s.WriteString(")") + + if dname := dialect.Name(); dname == "MySQLDialect" && index.IndexType != "" { + s.WriteString(fmt.Sprintf(" %s %s", m.Dialect.CreateIndexSuffix(), index.IndexType)) + } + s.WriteString(";") + _, err := m.Exec(s.String()) + return err +} + +func (t *TableMap) DropIndex(name string) error { + + var err error + dialect := reflect.TypeOf(t.dbmap.Dialect) + for _, idx := range t.indexes { + if idx.IndexName == name { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("DROP INDEX %s", idx.IndexName)) + + if dname := dialect.Name(); dname == "MySQLDialect" { + s.WriteString(fmt.Sprintf(" %s %s", t.dbmap.Dialect.DropIndexSuffix(), t.TableName)) + } + s.WriteString(";") + _, e := t.dbmap.Exec(s.String()) + if e != nil { + err = e + } + break + } + } + t.ResetSql() + return err +} + +// AddTable registers the given interface type with gorp. The table name +// will be given the name of the TypeOf(i). You must call this function, +// or AddTableWithName, for any struct type you wish to persist with +// the given DbMap. +// +// This operation is idempotent. If i's type is already mapped, the +// existing *TableMap is returned +func (m *DbMap) AddTable(i interface{}) *TableMap { + return m.AddTableWithName(i, "") +} + +// AddTableWithName has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithName(i interface{}, name string) *TableMap { + return m.AddTableWithNameAndSchema(i, "", name) +} + +// AddTableWithNameAndSchema has the same behavior as AddTable, but sets +// table.TableName to name. +func (m *DbMap) AddTableWithNameAndSchema(i interface{}, schema string, name string) *TableMap { + t := reflect.TypeOf(i) + if name == "" { + name = t.Name() + } + + // check if we have a table for this type already + // if so, update the name and return the existing pointer + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + table.TableName = name + return table + } + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + m.tables = append(m.tables, tmap) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + return tmap +} + +// AddTableDynamic registers the given interface type with gorp. +// The table name will be dynamically determined at runtime by +// using the GetTableName method on DynamicTable interface +func (m *DbMap) AddTableDynamic(inp DynamicTable, schema string) *TableMap { + + val := reflect.ValueOf(inp) + elm := val.Elem() + t := elm.Type() + name := inp.TableName() + if name == "" { + panic("Missing table name in DynamicTable instance") + } + + // Check if there is another dynamic table with the same name + if _, found := m.dynamicTableFind(name); found { + panic(fmt.Sprintf("A table with the same name %v already exists", name)) + } + + tmap := &TableMap{gotype: t, TableName: name, SchemaName: schema, dbmap: m} + var primaryKey []*ColumnMap + tmap.Columns, primaryKey = m.readStructColumns(t) + if len(primaryKey) > 0 { + tmap.keys = append(tmap.keys, primaryKey...) + } + + m.dynamicTableAdd(name, tmap) + + return tmap +} + +func (m *DbMap) readStructColumns(t reflect.Type) (cols []*ColumnMap, primaryKey []*ColumnMap) { + primaryKey = make([]*ColumnMap, 0) + n := t.NumField() + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Anonymous && f.Type.Kind() == reflect.Struct { + // Recursively add nested fields in embedded structs. + subcols, subpk := m.readStructColumns(f.Type) + // Don't append nested fields that have the same field + // name as an already-mapped field. + for _, subcol := range subcols { + shouldAppend := true + for _, col := range cols { + if !subcol.Transient && subcol.fieldName == col.fieldName { + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, subcol) + } + } + if subpk != nil { + primaryKey = append(primaryKey, subpk...) + } + } else { + // Tag = Name { ',' Option } + // Option = OptionKey [ ':' OptionValue ] + cArguments := strings.Split(f.Tag.Get("db"), ",") + columnName := cArguments[0] + var maxSize int + var defaultValue string + var isAuto bool + var isPK bool + var isNotNull bool + for _, argString := range cArguments[1:] { + argString = strings.TrimSpace(argString) + arg := strings.SplitN(argString, ":", 2) + + // check mandatory/unexpected option values + switch arg[0] { + case "size", "default": + // options requiring value + if len(arg) == 1 { + panic(fmt.Sprintf("missing option value for option %v on field %v", arg[0], f.Name)) + } + default: + // options where value is invalid (currently all other options) + if len(arg) == 2 { + panic(fmt.Sprintf("unexpected option value for option %v on field %v", arg[0], f.Name)) + } + } + + switch arg[0] { + case "size": + maxSize, _ = strconv.Atoi(arg[1]) + case "default": + defaultValue = arg[1] + case "primarykey": + isPK = true + case "autoincrement": + isAuto = true + case "notnull": + isNotNull = true + default: + panic(fmt.Sprintf("Unrecognized tag option for field %v: %v", f.Name, arg)) + } + } + if columnName == "" { + columnName = f.Name + } + + gotype := f.Type + valueType := gotype + if valueType.Kind() == reflect.Ptr { + valueType = valueType.Elem() + } + value := reflect.New(valueType).Interface() + if m.TypeConverter != nil { + // Make a new pointer to a value of type gotype and + // pass it to the TypeConverter's FromDb method to see + // if a different type should be used for the column + // type during table creation. + scanner, useHolder := m.TypeConverter.FromDb(value) + if useHolder { + value = scanner.Holder + gotype = reflect.TypeOf(value) + } + } + if typer, ok := value.(SqlTyper); ok { + gotype = reflect.TypeOf(typer.SqlType()) + } else if typer, ok := value.(legacySqlTyper); ok { + log.Printf("Deprecation Warning: update your SqlType methods to return a driver.Value") + gotype = reflect.TypeOf(typer.SqlType()) + } else if valuer, ok := value.(driver.Valuer); ok { + // Only check for driver.Valuer if SqlTyper wasn't + // found. + v, err := valuer.Value() + if err == nil && v != nil { + gotype = reflect.TypeOf(v) + } + } + cm := &ColumnMap{ + ColumnName: columnName, + DefaultValue: defaultValue, + Transient: columnName == "-", + fieldName: f.Name, + gotype: gotype, + isPK: isPK, + isAutoIncr: isAuto, + isNotNull: isNotNull, + MaxSize: maxSize, + } + if isPK { + primaryKey = append(primaryKey, cm) + } + // Check for nested fields of the same field name and + // override them. + shouldAppend := true + for index, col := range cols { + if !col.Transient && col.fieldName == cm.fieldName { + cols[index] = cm + shouldAppend = false + break + } + } + if shouldAppend { + cols = append(cols, cm) + } + } + + } + return +} + +// CreateTables iterates through TableMaps registered to this DbMap and +// executes "create table" statements against the database for each. +// +// This is particularly useful in unit tests where you want to create +// and destroy the schema automatically. +func (m *DbMap) CreateTables() error { + return m.createTables(false) +} + +// CreateTablesIfNotExists is similar to CreateTables, but starts +// each statement with "create table if not exists" so that existing +// tables do not raise errors +func (m *DbMap) CreateTablesIfNotExists() error { + return m.createTables(true) +} + +func (m *DbMap) createTables(ifNotExists bool) error { + var err error + for i := range m.tables { + table := m.tables[i] + sql := table.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + for _, tbl := range m.dynamicTableMap() { + sql := tbl.SqlForCreate(ifNotExists) + _, err = m.Exec(sql) + if err != nil { + return err + } + } + + return err +} + +// DropTable drops an individual table. +// Returns an error when the table does not exist. +func (m *DbMap) DropTable(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, false) +} + +// DropTableIfExists drops an individual table when the table exists. +func (m *DbMap) DropTableIfExists(table interface{}) error { + t := reflect.TypeOf(table) + + tableName := "" + if dyn, ok := table.(DynamicTable); ok { + tableName = dyn.TableName() + } + + return m.dropTable(t, tableName, true) +} + +// DropTables iterates through TableMaps registered to this DbMap and +// executes "drop table" statements against the database for each. +func (m *DbMap) DropTables() error { + return m.dropTables(false) +} + +// DropTablesIfExists is the same as DropTables, but uses the "if exists" clause to +// avoid errors for tables that do not exist. +func (m *DbMap) DropTablesIfExists() error { + return m.dropTables(true) +} + +// Goes through all the registered tables, dropping them one by one. +// If an error is encountered, then it is returned and the rest of +// the tables are not dropped. +func (m *DbMap) dropTables(addIfExists bool) (err error) { + for _, table := range m.tables { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + for _, table := range m.dynamicTableMap() { + err = m.dropTableImpl(table, addIfExists) + if err != nil { + return err + } + } + + return err +} + +// Implementation of dropping a single table. +func (m *DbMap) dropTable(t reflect.Type, name string, addIfExists bool) error { + table := tableOrNil(m, t, name) + if table == nil { + return fmt.Errorf("table %s was not registered", table.TableName) + } + + return m.dropTableImpl(table, addIfExists) +} + +func (m *DbMap) dropTableImpl(table *TableMap, ifExists bool) (err error) { + tableDrop := "drop table" + if ifExists { + tableDrop = m.Dialect.IfTableExists(tableDrop, table.SchemaName, table.TableName) + } + _, err = m.Exec(fmt.Sprintf("%s %s;", tableDrop, m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + return err +} + +// TruncateTables iterates through TableMaps registered to this DbMap and +// executes "truncate table" statements against the database for each, or in the case of +// sqlite, a "delete from" with no "where" clause, which uses the truncate optimization +// (http://www.sqlite.org/lang_delete.html) +func (m *DbMap) TruncateTables() error { + var err error + for i := range m.tables { + table := m.tables[i] + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + for _, table := range m.dynamicTableMap() { + _, e := m.Exec(fmt.Sprintf("%s %s;", m.Dialect.TruncateClause(), m.Dialect.QuotedTableForQuery(table.SchemaName, table.TableName))) + if e != nil { + err = e + } + } + + return err +} + +// Insert runs a SQL INSERT statement for each element in list. List +// items must be pointers. +// +// Any interface whose TableMap has an auto-increment primary key will +// have its last insert id bound to the PK field on the struct. +// +// The hook functions PreInsert() and/or PostInsert() will be executed +// before/after the INSERT statement if the interface defines them. +// +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Insert(list ...interface{}) error { + return insert(m, m, list...) +} + +// Update runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Update(list ...interface{}) (int64, error) { + return update(m, m, nil, list...) +} + +// UpdateColumns runs a SQL UPDATE statement for each element in list. List +// items must be pointers. +// +// Only the columns accepted by filter are included in the UPDATE. +// +// The hook functions PreUpdate() and/or PostUpdate() will be executed +// before/after the UPDATE statement if the interface defines them. +// +// Returns the number of rows updated. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(m, m, filter, list...) +} + +// Delete runs a SQL DELETE statement for each element in list. List +// items must be pointers. +// +// The hook functions PreDelete() and/or PostDelete() will be executed +// before/after the DELETE statement if the interface defines them. +// +// Returns the number of rows deleted. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Delete(list ...interface{}) (int64, error) { + return delete(m, m, list...) +} + +// Get runs a SQL SELECT to fetch a single row from the table based on the +// primary key(s) +// +// i should be an empty value for the struct to load. keys should be +// the primary key value(s) for the row to load. If multiple keys +// exist on the table, the order should match the column order +// specified in SetKeys() when the table mapping was defined. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Returns a pointer to a struct that matches or nil if no row is found. +// +// Returns an error if SetKeys has not been called on the TableMap +// Panics if any interface in the list has not been registered with AddTable +func (m *DbMap) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(m, m, i, keys...) +} + +// Select runs an arbitrary SQL query, binding the columns in the result +// to fields on the struct specified by i. args represent the bind +// parameters for the SQL statement. +// +// Column names on the SELECT statement should be aliased to the field names +// on the struct i. Returns an error if one or more columns in the result +// do not match. It is OK if fields on i are not part of the SQL +// statement. +// +// The hook function PostGet() will be executed after the SELECT +// statement if the interface defines them. +// +// Values are returned in one of two ways: +// 1. If i is a struct or a pointer to a struct, returns a slice of pointers to +// matching rows of type i. +// 2. If i is a pointer to a slice, the results will be appended to that slice +// and nil returned. +// +// i does NOT need to be registered with AddTable() +func (m *DbMap) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(m, m, i, query, args...) +} + +// Exec runs an arbitrary SQL statement. args represent the bind parameters. +// This is equivalent to running: Exec() using database/sql +func (m *DbMap) Exec(query string, args ...interface{}) (sql.Result, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return maybeExpandNamedQueryAndExec(m, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function +func (m *DbMap) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(m, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function +func (m *DbMap) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(m, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function +func (m *DbMap) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(m, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function +func (m *DbMap) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(m, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function +func (m *DbMap) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(m, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function +func (m *DbMap) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(m, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function +func (m *DbMap) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(m, m, holder, query, args...) +} + +// Begin starts a gorp Transaction +func (m *DbMap) Begin() (*Transaction, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, "begin;") + } + tx, err := begin(m) + if err != nil { + return nil, err + } + return &Transaction{ + dbmap: m, + tx: tx, + closed: false, + }, nil +} + +// TableFor returns the *TableMap corresponding to the given Go Type +// If no table is mapped to that type an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) TableFor(t reflect.Type, checkPK bool) (*TableMap, error) { + table := tableOrNil(m, t, "") + if table == nil { + return nil, fmt.Errorf("no table found for type: %v", t.Name()) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// DynamicTableFor returns the *TableMap for the dynamic table corresponding +// to the input tablename +// If no table is mapped to that tablename an error is returned. +// If checkPK is true and the mapped table has no registered PKs, an error is returned. +func (m *DbMap) DynamicTableFor(tableName string, checkPK bool) (*TableMap, error) { + table, found := m.dynamicTableFind(tableName) + if !found { + return nil, fmt.Errorf("gorp: no table found for name: %v", tableName) + } + + if checkPK && len(table.keys) < 1 { + e := fmt.Sprintf("gorp: no keys defined for table: %s", + table.TableName) + return nil, errors.New(e) + } + + return table, nil +} + +// Prepare creates a prepared statement for later queries or executions. +// Multiple queries or executions may be run concurrently from the returned statement. +// This is equivalent to running: Prepare() using database/sql +func (m *DbMap) Prepare(query string) (*sql.Stmt, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, nil) + } + return prepare(m, query) +} + +func tableOrNil(m *DbMap, t reflect.Type, name string) *TableMap { + if name != "" { + // Search by table name (dynamic tables) + if table, found := m.dynamicTableFind(name); found { + return table + } + return nil + } + + for i := range m.tables { + table := m.tables[i] + if table.gotype == t { + return table + } + } + return nil +} + +func (m *DbMap) tableForPointer(ptr interface{}, checkPK bool) (*TableMap, reflect.Value, error) { + ptrv := reflect.ValueOf(ptr) + if ptrv.Kind() != reflect.Ptr { + e := fmt.Sprintf("gorp: passed non-pointer: %v (kind=%v)", ptr, + ptrv.Kind()) + return nil, reflect.Value{}, errors.New(e) + } + elem := ptrv.Elem() + ifc := elem.Interface() + var t *TableMap + var err error + tableName := "" + if dyn, isDyn := ptr.(DynamicTable); isDyn { + tableName = dyn.TableName() + t, err = m.DynamicTableFor(tableName, checkPK) + } else { + etype := reflect.TypeOf(ifc) + t, err = m.TableFor(etype, checkPK) + } + + if err != nil { + return nil, reflect.Value{}, err + } + + return t, elem, nil +} + +func (m *DbMap) QueryRow(query string, args ...interface{}) *sql.Row { + if m.logger != nil { + now := time.Now() + defer m.trace(now, query, args...) + } + return queryRow(m, query, args...) +} + +func (m *DbMap) Query(q string, args ...interface{}) (*sql.Rows, error) { + if m.logger != nil { + now := time.Now() + defer m.trace(now, q, args...) + } + return query(m, q, args...) +} + +func (m *DbMap) trace(started time.Time, query string, args ...interface{}) { + if m.logger != nil { + var margs = argsString(args...) + m.logger.Printf("%s%s [%s] (%v)", m.logPrefix, query, margs, (time.Now().Sub(started))) + } +} diff --git a/vendor/github.com/go-gorp/gorp/dialect.go b/vendor/github.com/go-gorp/gorp/dialect.go new file mode 100644 index 000000000..22e30999d --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect.go @@ -0,0 +1,112 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "reflect" +) + +// The Dialect interface encapsulates behaviors that differ across +// SQL databases. At present the Dialect is only used by CreateTables() +// but this could change in the future +type Dialect interface { + // adds a suffix to any query, usually ";" + QuerySuffix() string + + // ToSqlType returns the SQL column type to use when creating a + // table of the given Go Type. maxsize can be used to switch based on + // size. For example, in MySQL []byte could map to BLOB, MEDIUMBLOB, + // or LONGBLOB depending on the maxsize + ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string + + // string to append to primary key column definitions + AutoIncrStr() string + + // string to bind autoincrement columns to. Empty string will + // remove reference to those columns in the INSERT statement. + AutoIncrBindValue() string + + AutoIncrInsertSuffix(col *ColumnMap) string + + // string to append to "create table" statement for vendor specific + // table attributes + CreateTableSuffix() string + + // string to append to "create index" statement + CreateIndexSuffix() string + + // string to append to "drop index" statement + DropIndexSuffix() string + + // string to truncate tables + TruncateClause() string + + // bind variable string to use when forming SQL statements + // in many dbs it is "?", but Postgres appears to use $1 + // + // i is a zero based index of the bind variable in this statement + // + BindVar(i int) string + + // Handles quoting of a field name to ensure that it doesn't raise any + // SQL parsing exceptions by using a reserved word as a field name. + QuoteField(field string) string + + // Handles building up of a schema.database string that is compatible with + // the given dialect + // + // schema - The schema that lives in + // table - The table name + QuotedTableForQuery(schema string, table string) string + + // Existence clause for table creation / deletion + IfSchemaNotExists(command, schema string) string + IfTableExists(command, schema, table string) string + IfTableNotExists(command, schema, table string) string +} + +// IntegerAutoIncrInserter is implemented by dialects that can perform +// inserts with automatically incremented integer primary keys. If +// the dialect can handle automatic assignment of more than just +// integers, see TargetedAutoIncrInserter. +type IntegerAutoIncrInserter interface { + InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) +} + +// TargetedAutoIncrInserter is implemented by dialects that can +// perform automatic assignment of any primary key type (i.e. strings +// for uuids, integers for serials, etc). +type TargetedAutoIncrInserter interface { + // InsertAutoIncrToTarget runs an insert operation and assigns the + // automatically generated primary key directly to the passed in + // target. The target should be a pointer to the primary key + // field of the value being inserted. + InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error +} + +// TargetQueryInserter is implemented by dialects that can perform +// assignment of integer primary key type by executing a query +// like "select sequence.currval from dual". +type TargetQueryInserter interface { + // TargetQueryInserter runs an insert operation and assigns the + // automatically generated primary key retrived by the query + // extracted from the GeneratedIdQuery field of the id column. + InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error +} + +func standardInsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + res, err := exec.Exec(insertSql, params...) + if err != nil { + return 0, err + } + return res.LastInsertId() +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_mysql.go b/vendor/github.com/go-gorp/gorp/dialect_mysql.go new file mode 100644 index 000000000..06606b8b6 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_mysql.go @@ -0,0 +1,176 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +// Implementation of Dialect for MySQL databases. +type MySQLDialect struct { + + // Engine is the storage engine to use "InnoDB" vs "MyISAM" for example + Engine string + + // Encoding is the character encoding to use for created tables + Encoding string +} + +func (d MySQLDialect) QuerySuffix() string { return ";" } + +func (d MySQLDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "tinyint unsigned" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "smallint unsigned" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "int unsigned" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "bigint unsigned" + case reflect.Float64, reflect.Float32: + return "double" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "mediumblob" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double" + case "NullBool": + return "tinyint" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + + /* == About varchar(N) == + * N is number of characters. + * A varchar column can store up to 65535 bytes. + * Remember that 1 character is 3 bytes in utf-8 charset. + * Also remember that each row can store up to 65535 bytes, + * and you have some overheads, so it's not possible for a + * varchar column to have 65535/3 characters really. + * So it would be better to use 'text' type in stead of + * large varchar type. + */ + if maxsize < 256 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } +} + +// Returns auto_increment +func (d MySQLDialect) AutoIncrStr() string { + return "auto_increment" +} + +func (d MySQLDialect) AutoIncrBindValue() string { + return "null" +} + +func (d MySQLDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns engine=%s charset=%s based on values stored on struct +func (d MySQLDialect) CreateTableSuffix() string { + if d.Engine == "" || d.Encoding == "" { + msg := "gorp - undefined" + + if d.Engine == "" { + msg += " MySQLDialect.Engine" + } + if d.Engine == "" && d.Encoding == "" { + msg += "," + } + if d.Encoding == "" { + msg += " MySQLDialect.Encoding" + } + msg += ". Check that your MySQLDialect was correctly initialized when declared." + panic(msg) + } + + return fmt.Sprintf(" engine=%s charset=%s", d.Engine, d.Encoding) +} + +func (d MySQLDialect) CreateIndexSuffix() string { + return "using" +} + +func (d MySQLDialect) DropIndexSuffix() string { + return "on" +} + +func (d MySQLDialect) TruncateClause() string { + return "truncate" +} + +func (d MySQLDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("sleep(%f)", s.Seconds()) +} + +// Returns "?" +func (d MySQLDialect) BindVar(i int) string { + return "?" +} + +func (d MySQLDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d MySQLDialect) QuoteField(f string) string { + return "`" + f + "`" +} + +func (d MySQLDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d MySQLDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d MySQLDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d MySQLDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_oracle.go b/vendor/github.com/go-gorp/gorp/dialect_oracle.go new file mode 100644 index 000000000..c381380f9 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_oracle.go @@ -0,0 +1,146 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Oracle databases. +type OracleDialect struct{} + +func (d OracleDialect) QuerySuffix() string { return "" } + +func (d OracleDialect) CreateIndexSuffix() string { return "" } + +func (d OracleDialect) DropIndexSuffix() string { return "" } + +func (d OracleDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "NullTime", "Time": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d OracleDialect) AutoIncrStr() string { + return "" +} + +func (d OracleDialect) AutoIncrBindValue() string { + return "NULL" +} + +func (d OracleDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d OracleDialect) CreateTableSuffix() string { + return "" +} + +func (d OracleDialect) TruncateClause() string { + return "truncate" +} + +// Returns "$(i+1)" +func (d OracleDialect) BindVar(i int) string { + return fmt.Sprintf(":%d", i+1) +} + +// After executing the insert uses the ColMap IdQuery to get the generated id +func (d OracleDialect) InsertQueryToTarget(exec SqlExecutor, insertSql, idSql string, target interface{}, params ...interface{}) error { + _, err := exec.Exec(insertSql, params...) + if err != nil { + return err + } + id, err := exec.SelectInt(idSql) + if err != nil { + return err + } + switch target.(type) { + case *int64: + *(target.(*int64)) = id + case *int32: + *(target.(*int32)) = int32(id) + case int: + *(target.(*int)) = int(id) + default: + return fmt.Errorf("Id field can be int, int32 or int64") + } + return nil +} + +func (d OracleDialect) QuoteField(f string) string { + return `"` + strings.ToUpper(f) + `"` +} + +func (d OracleDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_postgres.go b/vendor/github.com/go-gorp/gorp/dialect_postgres.go new file mode 100644 index 000000000..07c9bb9a6 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_postgres.go @@ -0,0 +1,156 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" + "time" +) + +type PostgresDialect struct { + suffix string + LowercaseFields bool +} + +func (d PostgresDialect) QuerySuffix() string { return ";" } + +func (d PostgresDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "boolean" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32: + if isAutoIncr { + return "serial" + } + return "integer" + case reflect.Int64, reflect.Uint64: + if isAutoIncr { + return "bigserial" + } + return "bigint" + case reflect.Float64: + return "double precision" + case reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "bytea" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "double precision" + case "NullBool": + return "boolean" + case "Time", "NullTime": + return "timestamp with time zone" + } + + if maxsize > 0 { + return fmt.Sprintf("varchar(%d)", maxsize) + } else { + return "text" + } + +} + +// Returns empty string +func (d PostgresDialect) AutoIncrStr() string { + return "" +} + +func (d PostgresDialect) AutoIncrBindValue() string { + return "default" +} + +func (d PostgresDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return " returning " + d.QuoteField(col.ColumnName) +} + +// Returns suffix +func (d PostgresDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d PostgresDialect) CreateIndexSuffix() string { + return "using" +} + +func (d PostgresDialect) DropIndexSuffix() string { + return "" +} + +func (d PostgresDialect) TruncateClause() string { + return "truncate" +} + +func (d PostgresDialect) SleepClause(s time.Duration) string { + return fmt.Sprintf("pg_sleep(%f)", s.Seconds()) +} + +// Returns "$(i+1)" +func (d PostgresDialect) BindVar(i int) string { + return fmt.Sprintf("$%d", i+1) +} + +func (d PostgresDialect) InsertAutoIncrToTarget(exec SqlExecutor, insertSql string, target interface{}, params ...interface{}) error { + rows, err := exec.Query(insertSql, params...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + return fmt.Errorf("No serial value returned for insert: %s Encountered error: %s", insertSql, rows.Err()) + } + if err := rows.Scan(target); err != nil { + return err + } + if rows.Next() { + return fmt.Errorf("more than two serial value returned for insert: %s", insertSql) + } + return rows.Err() +} + +func (d PostgresDialect) QuoteField(f string) string { + if d.LowercaseFields { + return `"` + strings.ToLower(f) + `"` + } + return `"` + f + `"` +} + +func (d PostgresDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + + return schema + "." + d.QuoteField(table) +} + +func (d PostgresDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d PostgresDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d PostgresDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_sqlite.go b/vendor/github.com/go-gorp/gorp/dialect_sqlite.go new file mode 100644 index 000000000..7d9b29757 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_sqlite.go @@ -0,0 +1,119 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" +) + +type SqliteDialect struct { + suffix string +} + +func (d SqliteDialect) QuerySuffix() string { return ";" } + +func (d SqliteDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "integer" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return "integer" + case reflect.Float64, reflect.Float32: + return "real" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "blob" + } + } + + switch val.Name() { + case "NullInt64": + return "integer" + case "NullFloat64": + return "real" + case "NullBool": + return "integer" + case "Time": + return "datetime" + } + + if maxsize < 1 { + maxsize = 255 + } + return fmt.Sprintf("varchar(%d)", maxsize) +} + +// Returns autoincrement +func (d SqliteDialect) AutoIncrStr() string { + return "autoincrement" +} + +func (d SqliteDialect) AutoIncrBindValue() string { + return "null" +} + +func (d SqliteDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +// Returns suffix +func (d SqliteDialect) CreateTableSuffix() string { + return d.suffix +} + +func (d SqliteDialect) CreateIndexSuffix() string { + return "" +} + +func (d SqliteDialect) DropIndexSuffix() string { + return "" +} + +// With sqlite, there technically isn't a TRUNCATE statement, +// but a DELETE FROM uses a truncate optimization: +// http://www.sqlite.org/lang_delete.html +func (d SqliteDialect) TruncateClause() string { + return "delete from" +} + +// Returns "?" +func (d SqliteDialect) BindVar(i int) string { + return "?" +} + +func (d SqliteDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqliteDialect) QuoteField(f string) string { + return `"` + f + `"` +} + +// sqlite does not have schemas like PostgreSQL does, so just escape it like normal +func (d SqliteDialect) QuotedTableForQuery(schema string, table string) string { + return d.QuoteField(table) +} + +func (d SqliteDialect) IfSchemaNotExists(command, schema string) string { + return fmt.Sprintf("%s if not exists", command) +} + +func (d SqliteDialect) IfTableExists(command, schema, table string) string { + return fmt.Sprintf("%s if exists", command) +} + +func (d SqliteDialect) IfTableNotExists(command, schema, table string) string { + return fmt.Sprintf("%s if not exists", command) +} diff --git a/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go b/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go new file mode 100644 index 000000000..8808af598 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/dialect_sqlserver.go @@ -0,0 +1,152 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" + "strings" +) + +// Implementation of Dialect for Microsoft SQL Server databases. +// Use gorp.SqlServerDialect{"2005"} for legacy datatypes. +// Tested with driver: github.com/denisenkom/go-mssqldb + +type SqlServerDialect struct { + + // If set to "2005" legacy datatypes will be used + Version string +} + +func (d SqlServerDialect) ToSqlType(val reflect.Type, maxsize int, isAutoIncr bool) string { + switch val.Kind() { + case reflect.Ptr: + return d.ToSqlType(val.Elem(), maxsize, isAutoIncr) + case reflect.Bool: + return "bit" + case reflect.Int8: + return "tinyint" + case reflect.Uint8: + return "smallint" + case reflect.Int16: + return "smallint" + case reflect.Uint16: + return "int" + case reflect.Int, reflect.Int32: + return "int" + case reflect.Uint, reflect.Uint32: + return "bigint" + case reflect.Int64: + return "bigint" + case reflect.Uint64: + return "numeric(20,0)" + case reflect.Float32: + return "float(24)" + case reflect.Float64: + return "float(53)" + case reflect.Slice: + if val.Elem().Kind() == reflect.Uint8 { + return "varbinary" + } + } + + switch val.Name() { + case "NullInt64": + return "bigint" + case "NullFloat64": + return "float(53)" + case "NullBool": + return "bit" + case "NullTime", "Time": + if d.Version == "2005" { + return "datetime" + } + return "datetime2" + } + + if maxsize < 1 { + if d.Version == "2005" { + maxsize = 255 + } else { + return fmt.Sprintf("nvarchar(max)") + } + } + return fmt.Sprintf("nvarchar(%d)", maxsize) +} + +// Returns auto_increment +func (d SqlServerDialect) AutoIncrStr() string { + return "identity(0,1)" +} + +// Empty string removes autoincrement columns from the INSERT statements. +func (d SqlServerDialect) AutoIncrBindValue() string { + return "" +} + +func (d SqlServerDialect) AutoIncrInsertSuffix(col *ColumnMap) string { + return "" +} + +func (d SqlServerDialect) CreateTableSuffix() string { return ";" } + +func (d SqlServerDialect) TruncateClause() string { + return "truncate table" +} + +// Returns "?" +func (d SqlServerDialect) BindVar(i int) string { + return "?" +} + +func (d SqlServerDialect) InsertAutoIncr(exec SqlExecutor, insertSql string, params ...interface{}) (int64, error) { + return standardInsertAutoIncr(exec, insertSql, params...) +} + +func (d SqlServerDialect) QuoteField(f string) string { + return "[" + strings.Replace(f, "]", "]]", -1) + "]" +} + +func (d SqlServerDialect) QuotedTableForQuery(schema string, table string) string { + if strings.TrimSpace(schema) == "" { + return d.QuoteField(table) + } + return d.QuoteField(schema) + "." + d.QuoteField(table) +} + +func (d SqlServerDialect) QuerySuffix() string { return ";" } + +func (d SqlServerDialect) IfSchemaNotExists(command, schema string) string { + s := fmt.Sprintf("if schema_id(N'%s') is null %s", schema, command) + return s +} + +func (d SqlServerDialect) IfTableExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", d.QuoteField(schema)) + } + s := fmt.Sprintf("if object_id('%s%s') is not null %s", schema_clause, d.QuoteField(table), command) + return s +} + +func (d SqlServerDialect) IfTableNotExists(command, schema, table string) string { + var schema_clause string + if strings.TrimSpace(schema) != "" { + schema_clause = fmt.Sprintf("%s.", schema) + } + s := fmt.Sprintf("if object_id('%s%s') is null %s", schema_clause, table, command) + return s +} + +func (d SqlServerDialect) CreateIndexSuffix() string { return "" } +func (d SqlServerDialect) DropIndexSuffix() string { return "" } diff --git a/vendor/github.com/go-gorp/gorp/errors.go b/vendor/github.com/go-gorp/gorp/errors.go new file mode 100644 index 000000000..d13f03fc3 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/errors.go @@ -0,0 +1,38 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" +) + +// A non-fatal error, when a select query returns columns that do not exist +// as fields in the struct it is being mapped to +// TODO: discuss wether this needs an error. encoding/json silently ignores missing fields +type NoFieldInTypeError struct { + TypeName string + MissingColNames []string +} + +func (err *NoFieldInTypeError) Error() string { + return fmt.Sprintf("gorp: no fields %+v in type %s", err.MissingColNames, err.TypeName) +} + +// returns true if the error is non-fatal (ie, we shouldn't immediately return) +func NonFatalError(err error) bool { + switch err.(type) { + case *NoFieldInTypeError: + return true + default: + return false + } +} diff --git a/vendor/github.com/go-gorp/gorp/gorp.go b/vendor/github.com/go-gorp/gorp/gorp.go new file mode 100644 index 000000000..40e601ca5 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp.go @@ -0,0 +1,608 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// +package gorp + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "strings" + "time" +) + +// OracleString (empty string is null) +// TODO: move to dialect/oracle?, rename to String? +type OracleString struct { + sql.NullString +} + +// Scan implements the Scanner interface. +func (os *OracleString) Scan(value interface{}) error { + if value == nil { + os.String, os.Valid = "", false + return nil + } + os.Valid = true + return os.NullString.Scan(value) +} + +// Value implements the driver Valuer interface. +func (os OracleString) Value() (driver.Value, error) { + if !os.Valid || os.String == "" { + return nil, nil + } + return os.String, nil +} + +// SqlTyper is a type that returns its database type. Most of the +// time, the type can just use "database/sql/driver".Valuer; but when +// it returns nil for its empty value, it needs to implement SqlTyper +// to have its column type detected properly during table creation. +type SqlTyper interface { + SqlType() driver.Value +} + +// legacySqlTyper prevents breaking clients who depended on the previous +// SqlTyper interface +type legacySqlTyper interface { + SqlType() driver.Valuer +} + +// for fields that exists in DB table, but not exists in struct +type dummyField struct{} + +// Scan implements the Scanner interface. +func (nt *dummyField) Scan(value interface{}) error { + return nil +} + +var zeroVal reflect.Value +var versFieldConst = "[gorp_ver_field]" + +// The TypeConverter interface provides a way to map a value of one +// type to another type when persisting to, or loading from, a database. +// +// Example use cases: Implement type converter to convert bool types to "y"/"n" strings, +// or serialize a struct member as a JSON blob. +type TypeConverter interface { + // ToDb converts val to another type. Called before INSERT/UPDATE operations + ToDb(val interface{}) (interface{}, error) + + // FromDb returns a CustomScanner appropriate for this type. This will be used + // to hold values returned from SELECT queries. + // + // In particular the CustomScanner returned should implement a Binder + // function appropriate for the Go type you wish to convert the db value to + // + // If bool==false, then no custom scanner will be used for this field. + FromDb(target interface{}) (CustomScanner, bool) +} + +// SqlExecutor exposes gorp operations that can be run from Pre/Post +// hooks. This hides whether the current operation that triggered the +// hook is in a transaction. +// +// See the DbMap function docs for each of the functions below for more +// information. +type SqlExecutor interface { + WithContext(ctx context.Context) SqlExecutor + Get(i interface{}, keys ...interface{}) (interface{}, error) + Insert(list ...interface{}) error + Update(list ...interface{}) (int64, error) + Delete(list ...interface{}) (int64, error) + Exec(query string, args ...interface{}) (sql.Result, error) + Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) + SelectInt(query string, args ...interface{}) (int64, error) + SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) + SelectFloat(query string, args ...interface{}) (float64, error) + SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) + SelectStr(query string, args ...interface{}) (string, error) + SelectNullStr(query string, args ...interface{}) (sql.NullString, error) + SelectOne(holder interface{}, query string, args ...interface{}) error + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryRow(query string, args ...interface{}) *sql.Row +} + +// DynamicTable allows the users of gorp to dynamically +// use different database table names during runtime +// while sharing the same golang struct for in-memory data +type DynamicTable interface { + TableName() string + SetTableName(string) +} + +// Compile-time check that DbMap and Transaction implement the SqlExecutor +// interface. +var _, _ SqlExecutor = &DbMap{}, &Transaction{} + +func argsString(args ...interface{}) string { + var margs string + for i, a := range args { + var v interface{} = a + if x, ok := v.(driver.Valuer); ok { + y, err := x.Value() + if err == nil { + v = y + } + } + switch v.(type) { + case string: + v = fmt.Sprintf("%q", v) + default: + v = fmt.Sprintf("%v", v) + } + margs += fmt.Sprintf("%d:%s", i+1, v) + if i+1 < len(args) { + margs += " " + } + } + return margs +} + +// Calls the Exec function on the executor, but attempts to expand any eligible named +// query arguments first. +func maybeExpandNamedQueryAndExec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + dbMap := extractDbMap(e) + + if len(args) == 1 { + query, args = maybeExpandNamedQuery(dbMap, query, args) + } + + return exec(e, query, args...) +} + +func extractDbMap(e SqlExecutor) *DbMap { + switch m := e.(type) { + case *DbMap: + return m + case *Transaction: + return m.dbmap + } + return nil +} + +func extractExecutorAndContext(e SqlExecutor) (executor, context.Context) { + switch m := e.(type) { + case *DbMap: + return m.Db, m.ctx + case *Transaction: + return m.tx, m.ctx + } + return nil, nil +} + +// maybeExpandNamedQuery checks the given arg to see if it's eligible to be used +// as input to a named query. If so, it rewrites the query to use +// dialect-dependent bindvars and instantiates the corresponding slice of +// parameters by extracting data from the map / struct. +// If not, returns the input values unchanged. +func maybeExpandNamedQuery(m *DbMap, query string, args []interface{}) (string, []interface{}) { + var ( + arg = args[0] + argval = reflect.ValueOf(arg) + ) + if argval.Kind() == reflect.Ptr { + argval = argval.Elem() + } + + if argval.Kind() == reflect.Map && argval.Type().Key().Kind() == reflect.String { + return expandNamedQuery(m, query, func(key string) reflect.Value { + return argval.MapIndex(reflect.ValueOf(key)) + }) + } + if argval.Kind() != reflect.Struct { + return query, args + } + if _, ok := arg.(time.Time); ok { + // time.Time is driver.Value + return query, args + } + if _, ok := arg.(driver.Valuer); ok { + // driver.Valuer will be converted to driver.Value. + return query, args + } + + return expandNamedQuery(m, query, argval.FieldByName) +} + +var keyRegexp = regexp.MustCompile(`:[[:word:]]+`) + +// expandNamedQuery accepts a query with placeholders of the form ":key", and a +// single arg of Kind Struct or Map[string]. It returns the query with the +// dialect's placeholders, and a slice of args ready for positional insertion +// into the query. +func expandNamedQuery(m *DbMap, query string, keyGetter func(key string) reflect.Value) (string, []interface{}) { + var ( + n int + args []interface{} + ) + return keyRegexp.ReplaceAllStringFunc(query, func(key string) string { + val := keyGetter(key[1:]) + if !val.IsValid() { + return key + } + args = append(args, val.Interface()) + newVar := m.Dialect.BindVar(n) + n++ + return newVar + }), args +} + +func columnToFieldIndex(m *DbMap, t reflect.Type, name string, cols []string) ([][]int, error) { + colToFieldIndex := make([][]int, len(cols)) + + // check if type t is a mapped table - if so we'll + // check the table for column aliasing below + tableMapped := false + table := tableOrNil(m, t, name) + if table != nil { + tableMapped = true + } + + // Loop over column names and find field in i to bind to + // based on column name. all returned columns must match + // a field in the i struct + missingColNames := []string{} + for x := range cols { + colName := strings.ToLower(cols[x]) + field, found := t.FieldByNameFunc(func(fieldName string) bool { + field, _ := t.FieldByName(fieldName) + cArguments := strings.Split(field.Tag.Get("db"), ",") + fieldName = cArguments[0] + + if fieldName == "-" { + return false + } else if fieldName == "" { + fieldName = field.Name + } + if tableMapped { + colMap := colMapOrNil(table, fieldName) + if colMap != nil { + fieldName = colMap.ColumnName + } + } + return colName == strings.ToLower(fieldName) + }) + if found { + colToFieldIndex[x] = field.Index + } + if colToFieldIndex[x] == nil { + missingColNames = append(missingColNames, colName) + } + } + if len(missingColNames) > 0 { + return colToFieldIndex, &NoFieldInTypeError{ + TypeName: t.Name(), + MissingColNames: missingColNames, + } + } + return colToFieldIndex, nil +} + +func fieldByName(val reflect.Value, fieldName string) *reflect.Value { + // try to find field by exact match + f := val.FieldByName(fieldName) + + if f != zeroVal { + return &f + } + + // try to find by case insensitive match - only the Postgres driver + // seems to require this - in the case where columns are aliased in the sql + fieldNameL := strings.ToLower(fieldName) + fieldCount := val.NumField() + t := val.Type() + for i := 0; i < fieldCount; i++ { + sf := t.Field(i) + if strings.ToLower(sf.Name) == fieldNameL { + f := val.Field(i) + return &f + } + } + + return nil +} + +// toSliceType returns the element type of the given object, if the object is a +// "*[]*Element" or "*[]Element". If not, returns nil. +// err is returned if the user was trying to pass a pointer-to-slice but failed. +func toSliceType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + if t.Kind() != reflect.Ptr { + // If it's a slice, return a more helpful error message + if t.Kind() == reflect.Slice { + return nil, fmt.Errorf("gorp: cannot SELECT into a non-pointer slice: %v", t) + } + return nil, nil + } + if t = t.Elem(); t.Kind() != reflect.Slice { + return nil, nil + } + return t.Elem(), nil +} + +func toType(i interface{}) (reflect.Type, error) { + t := reflect.TypeOf(i) + + // If a Pointer to a type, follow + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("gorp: cannot SELECT into this type: %v", reflect.TypeOf(i)) + } + return t, nil +} + +type foundTable struct { + table *TableMap + dynName *string +} + +func tableFor(m *DbMap, t reflect.Type, i interface{}) (*foundTable, error) { + if dyn, isDynamic := i.(DynamicTable); isDynamic { + tableName := dyn.TableName() + table, err := m.DynamicTableFor(tableName, true) + if err != nil { + return nil, err + } + return &foundTable{ + table: table, + dynName: &tableName, + }, nil + } + table, err := m.TableFor(t, true) + if err != nil { + return nil, err + } + return &foundTable{table: table}, nil +} + +func get(m *DbMap, exec SqlExecutor, i interface{}, + keys ...interface{}) (interface{}, error) { + + t, err := toType(i) + if err != nil { + return nil, err + } + + foundTable, err := tableFor(m, t, i) + if err != nil { + return nil, err + } + table := foundTable.table + + plan := table.bindGet() + + v := reflect.New(t) + if foundTable.dynName != nil { + retDyn := v.Interface().(DynamicTable) + retDyn.SetTableName(*foundTable.dynName) + } + + dest := make([]interface{}, len(plan.argFields)) + + conv := m.TypeConverter + custScan := make([]CustomScanner, 0) + + for x, fieldName := range plan.argFields { + f := v.Elem().FieldByName(fieldName) + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + row := exec.QueryRow(plan.query, keys...) + err = row.Scan(dest...) + if err != nil { + if err == sql.ErrNoRows { + err = nil + } + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if v, ok := v.Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + + return v.Interface(), nil +} + +func delete(m *DbMap, exec SqlExecutor, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreDelete); ok { + err = v.PreDelete(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindDelete(elem) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + count += rows + + if v, ok := eval.(HasPostDelete); ok { + err := v.PostDelete(exec) + if err != nil { + return -1, err + } + } + } + + return count, nil +} + +func update(m *DbMap, exec SqlExecutor, colFilter ColumnFilter, list ...interface{}) (int64, error) { + count := int64(0) + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, true) + if err != nil { + return -1, err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreUpdate); ok { + err = v.PreUpdate(exec) + if err != nil { + return -1, err + } + } + + bi, err := table.bindUpdate(elem, colFilter) + if err != nil { + return -1, err + } + + res, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return -1, err + } + + rows, err := res.RowsAffected() + if err != nil { + return -1, err + } + + if rows == 0 && bi.existingVersion > 0 { + return lockError(m, exec, table.TableName, + bi.existingVersion, elem, bi.keys...) + } + + if bi.versField != "" { + elem.FieldByName(bi.versField).SetInt(bi.existingVersion + 1) + } + + count += rows + + if v, ok := eval.(HasPostUpdate); ok { + err = v.PostUpdate(exec) + if err != nil { + return -1, err + } + } + } + return count, nil +} + +func insert(m *DbMap, exec SqlExecutor, list ...interface{}) error { + for _, ptr := range list { + table, elem, err := m.tableForPointer(ptr, false) + if err != nil { + return err + } + + eval := elem.Addr().Interface() + if v, ok := eval.(HasPreInsert); ok { + err := v.PreInsert(exec) + if err != nil { + return err + } + } + + bi, err := table.bindInsert(elem) + if err != nil { + return err + } + + if bi.autoIncrIdx > -1 { + f := elem.FieldByName(bi.autoIncrFieldName) + switch inserter := m.Dialect.(type) { + case IntegerAutoIncrInserter: + id, err := inserter.InsertAutoIncr(exec, bi.query, bi.args...) + if err != nil { + return err + } + k := f.Kind() + if (k == reflect.Int) || (k == reflect.Int16) || (k == reflect.Int32) || (k == reflect.Int64) { + f.SetInt(id) + } else if (k == reflect.Uint) || (k == reflect.Uint16) || (k == reflect.Uint32) || (k == reflect.Uint64) { + f.SetUint(uint64(id)) + } else { + return fmt.Errorf("gorp: cannot set autoincrement value on non-Int field. SQL=%s autoIncrIdx=%d autoIncrFieldName=%s", bi.query, bi.autoIncrIdx, bi.autoIncrFieldName) + } + case TargetedAutoIncrInserter: + err := inserter.InsertAutoIncrToTarget(exec, bi.query, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + case TargetQueryInserter: + var idQuery = table.ColMap(bi.autoIncrFieldName).GeneratedIdQuery + if idQuery == "" { + return fmt.Errorf("gorp: cannot set %s value if its ColumnMap.GeneratedIdQuery is empty", bi.autoIncrFieldName) + } + err := inserter.InsertQueryToTarget(exec, bi.query, idQuery, f.Addr().Interface(), bi.args...) + if err != nil { + return err + } + default: + return fmt.Errorf("gorp: cannot use autoincrement fields on dialects that do not implement an autoincrementing interface") + } + } else { + _, err := exec.Exec(bi.query, bi.args...) + if err != nil { + return err + } + } + + if v, ok := eval.(HasPostInsert); ok { + err := v.PostInsert(exec) + if err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/go-gorp/gorp/gorp_go17.go b/vendor/github.com/go-gorp/gorp/gorp_go17.go new file mode 100644 index 000000000..95cc989d1 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp_go17.go @@ -0,0 +1,54 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// + +// +build !go1.8 + +package gorp + +import "database/sql" + +// Executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, _ := extractExecutorAndContext(e) + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, _ := extractExecutorAndContext(e) + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/gorp_go18.go b/vendor/github.com/go-gorp/gorp/gorp_go18.go new file mode 100644 index 000000000..ecebd473f --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/gorp_go18.go @@ -0,0 +1,81 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp +// + +// +build go1.8 + +package gorp + +import ( + "context" + "database/sql" +) + +// executor exposes the sql.DB and sql.Tx functions so that it can be used +// on internal functions that need to be agnostic to the underlying object. +type executor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Prepare(query string) (*sql.Stmt, error) + QueryRow(query string, args ...interface{}) *sql.Row + Query(query string, args ...interface{}) (*sql.Rows, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) + QueryRowContext(ctx context.Context, query string, args ...interface{}) *sql.Row + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +func exec(e SqlExecutor, query string, args ...interface{}) (sql.Result, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.ExecContext(ctx, query, args...) + } + + return executor.Exec(query, args...) +} + +func prepare(e SqlExecutor, query string) (*sql.Stmt, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.PrepareContext(ctx, query) + } + + return executor.Prepare(query) +} + +func queryRow(e SqlExecutor, query string, args ...interface{}) *sql.Row { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryRowContext(ctx, query, args...) + } + + return executor.QueryRow(query, args...) +} + +func query(e SqlExecutor, query string, args ...interface{}) (*sql.Rows, error) { + executor, ctx := extractExecutorAndContext(e) + + if ctx != nil { + return executor.QueryContext(ctx, query, args...) + } + + return executor.Query(query, args...) +} + +func begin(m *DbMap) (*sql.Tx, error) { + if m.ctx != nil { + return m.Db.BeginTx(m.ctx, nil) + } + + return m.Db.Begin() +} diff --git a/vendor/github.com/go-gorp/gorp/hooks.go b/vendor/github.com/go-gorp/gorp/hooks.go new file mode 100644 index 000000000..192b51f00 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/hooks.go @@ -0,0 +1,49 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +//++ TODO v2-phase3: HasPostGet => PostGetter, HasPostDelete => PostDeleter, etc. + +// PostUpdate() will be executed after the GET statement. +type HasPostGet interface { + PostGet(SqlExecutor) error +} + +// PostUpdate() will be executed after the DELETE statement +type HasPostDelete interface { + PostDelete(SqlExecutor) error +} + +// PostUpdate() will be executed after the UPDATE statement +type HasPostUpdate interface { + PostUpdate(SqlExecutor) error +} + +// PostInsert() will be executed after the INSERT statement +type HasPostInsert interface { + PostInsert(SqlExecutor) error +} + +// PreDelete() will be executed before the DELETE statement. +type HasPreDelete interface { + PreDelete(SqlExecutor) error +} + +// PreUpdate() will be executed before UPDATE statement. +type HasPreUpdate interface { + PreUpdate(SqlExecutor) error +} + +// PreInsert() will be executed before INSERT statement. +type HasPreInsert interface { + PreInsert(SqlExecutor) error +} diff --git a/vendor/github.com/go-gorp/gorp/index.go b/vendor/github.com/go-gorp/gorp/index.go new file mode 100644 index 000000000..01ecd9eca --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/index.go @@ -0,0 +1,56 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +// IndexMap represents a mapping between a Go struct field and a single +// index in a table. +// Unique and MaxSize only inform the +// CreateTables() function and are not used by Insert/Update/Delete/Get. +type IndexMap struct { + // Index name in db table + IndexName string + + // If true, " unique" is added to create index statements. + // Not used elsewhere + Unique bool + + // Index type supported by Dialect + // Postgres: B-tree, Hash, GiST and GIN. + // Mysql: Btree, Hash. + // Sqlite: nil. + IndexType string + + // Columns name for single and multiple indexes + columns []string +} + +// Rename allows you to specify the index name in the table +// +// Example: table.IndMap("customer_test_idx").Rename("customer_idx") +// +func (idx *IndexMap) Rename(indname string) *IndexMap { + idx.IndexName = indname + return idx +} + +// SetUnique adds "unique" to the create index statements for this +// index, if b is true. +func (idx *IndexMap) SetUnique(b bool) *IndexMap { + idx.Unique = b + return idx +} + +// SetIndexType specifies the index type supported by chousen SQL Dialect +func (idx *IndexMap) SetIndexType(indtype string) *IndexMap { + idx.IndexType = indtype + return idx +} diff --git a/vendor/github.com/go-gorp/gorp/lockerror.go b/vendor/github.com/go-gorp/gorp/lockerror.go new file mode 100644 index 000000000..07b3047ae --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/lockerror.go @@ -0,0 +1,63 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "fmt" + "reflect" +) + +// OptimisticLockError is returned by Update() or Delete() if the +// struct being modified has a Version field and the value is not equal to +// the current value in the database +type OptimisticLockError struct { + // Table name where the lock error occurred + TableName string + + // Primary key values of the row being updated/deleted + Keys []interface{} + + // true if a row was found with those keys, indicating the + // LocalVersion is stale. false if no value was found with those + // keys, suggesting the row has been deleted since loaded, or + // was never inserted to begin with + RowExists bool + + // Version value on the struct passed to Update/Delete. This value is + // out of sync with the database. + LocalVersion int64 +} + +// Error returns a description of the cause of the lock error +func (e OptimisticLockError) Error() string { + if e.RowExists { + return fmt.Sprintf("gorp: OptimisticLockError table=%s keys=%v out of date version=%d", e.TableName, e.Keys, e.LocalVersion) + } + + return fmt.Sprintf("gorp: OptimisticLockError no row found for table=%s keys=%v", e.TableName, e.Keys) +} + +func lockError(m *DbMap, exec SqlExecutor, tableName string, + existingVer int64, elem reflect.Value, + keys ...interface{}) (int64, error) { + + existing, err := get(m, exec, elem.Interface(), keys...) + if err != nil { + return -1, err + } + + ole := OptimisticLockError{tableName, keys, true, existingVer} + if existing == nil { + ole.RowExists = false + } + return -1, ole +} diff --git a/vendor/github.com/go-gorp/gorp/logging.go b/vendor/github.com/go-gorp/gorp/logging.go new file mode 100644 index 000000000..89d6c0e79 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/logging.go @@ -0,0 +1,44 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import "fmt" + +type GorpLogger interface { + Printf(format string, v ...interface{}) +} + +// TraceOn turns on SQL statement logging for this DbMap. After this is +// called, all SQL statements will be sent to the logger. If prefix is +// a non-empty string, it will be written to the front of all logged +// strings, which can aid in filtering log lines. +// +// Use TraceOn if you want to spy on the SQL statements that gorp +// generates. +// +// Note that the base log.Logger type satisfies GorpLogger, but adapters can +// easily be written for other logging packages (e.g., the golang-sanctioned +// glog framework). +func (m *DbMap) TraceOn(prefix string, logger GorpLogger) { + m.logger = logger + if prefix == "" { + m.logPrefix = prefix + } else { + m.logPrefix = fmt.Sprintf("%s ", prefix) + } +} + +// TraceOff turns off tracing. It is idempotent. +func (m *DbMap) TraceOff() { + m.logger = nil + m.logPrefix = "" +} diff --git a/vendor/github.com/go-gorp/gorp/nulltypes.go b/vendor/github.com/go-gorp/gorp/nulltypes.go new file mode 100644 index 000000000..870770372 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/nulltypes.go @@ -0,0 +1,58 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "database/sql/driver" + "time" +) + +// A nullable Time value +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + switch t := value.(type) { + case time.Time: + nt.Time, nt.Valid = t, true + case []byte: + nt.Valid = false + for _, dtfmt := range []string{ + "2006-01-02 15:04:05.999999999", + "2006-01-02T15:04:05.999999999", + "2006-01-02 15:04:05", + "2006-01-02T15:04:05", + "2006-01-02 15:04", + "2006-01-02T15:04", + "2006-01-02", + "2006-01-02 15:04:05-07:00", + } { + var err error + if nt.Time, err = time.Parse(dtfmt, string(t)); err == nil { + nt.Valid = true + break + } + } + } + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/go-gorp/gorp/select.go b/vendor/github.com/go-gorp/gorp/select.go new file mode 100644 index 000000000..fa9cae8da --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/select.go @@ -0,0 +1,366 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "database/sql" + "fmt" + "reflect" +) + +// SelectInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectInt(e SqlExecutor, query string, args ...interface{}) (int64, error) { + var h int64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullInt executes the given query, which should be a SELECT statement for a single +// integer column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullInt(e SqlExecutor, query string, args ...interface{}) (sql.NullInt64, error) { + var h sql.NullInt64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, zero is returned. +func SelectFloat(e SqlExecutor, query string, args ...interface{}) (float64, error) { + var h float64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return 0, err + } + return h, nil +} + +// SelectNullFloat executes the given query, which should be a SELECT statement for a single +// float column, and returns the value of the first row returned. If no rows are +// found, the empty sql.NullInt64 value is returned. +func SelectNullFloat(e SqlExecutor, query string, args ...interface{}) (sql.NullFloat64, error) { + var h sql.NullFloat64 + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectStr executes the given query, which should be a SELECT statement for a single +// char/varchar column, and returns the value of the first row returned. If no rows are +// found, an empty string is returned. +func SelectStr(e SqlExecutor, query string, args ...interface{}) (string, error) { + var h string + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return "", err + } + return h, nil +} + +// SelectNullStr executes the given query, which should be a SELECT +// statement for a single char/varchar column, and returns the value +// of the first row returned. If no rows are found, the empty +// sql.NullString is returned. +func SelectNullStr(e SqlExecutor, query string, args ...interface{}) (sql.NullString, error) { + var h sql.NullString + err := selectVal(e, &h, query, args...) + if err != nil && err != sql.ErrNoRows { + return h, err + } + return h, nil +} + +// SelectOne executes the given query (which should be a SELECT statement) +// and binds the result to holder, which must be a pointer. +// +// If no row is found, an error (sql.ErrNoRows specifically) will be returned +// +// If more than one row is found, an error will be returned. +// +func SelectOne(m *DbMap, e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + t := reflect.TypeOf(holder) + if t.Kind() == reflect.Ptr { + t = t.Elem() + } else { + return fmt.Errorf("gorp: SelectOne holder must be a pointer, but got: %t", holder) + } + + // Handle pointer to pointer + isptr := false + if t.Kind() == reflect.Ptr { + isptr = true + t = t.Elem() + } + + if t.Kind() == reflect.Struct { + var nonFatalErr error + + list, err := hookedselect(m, e, holder, query, args...) + if err != nil { + if !NonFatalError(err) { // FIXME: double negative, rename NonFatalError to FatalError + return err + } + nonFatalErr = err + } + + dest := reflect.ValueOf(holder) + if isptr { + dest = dest.Elem() + } + + if list != nil && len(list) > 0 { // FIXME: invert if/else + // check for multiple rows + if len(list) > 1 { + return fmt.Errorf("gorp: multiple rows returned for: %s - %v", query, args) + } + + // Initialize if nil + if dest.IsNil() { + dest.Set(reflect.New(t)) + } + + // only one row found + src := reflect.ValueOf(list[0]) + dest.Elem().Set(src.Elem()) + } else { + // No rows found, return a proper error. + return sql.ErrNoRows + } + + return nonFatalErr + } + + return selectVal(e, holder, query, args...) +} + +func selectVal(e SqlExecutor, holder interface{}, query string, args ...interface{}) error { + if len(args) == 1 { + switch m := e.(type) { + case *DbMap: + query, args = maybeExpandNamedQuery(m, query, args) + case *Transaction: + query, args = maybeExpandNamedQuery(m.dbmap, query, args) + } + } + rows, err := e.Query(query, args...) + if err != nil { + return err + } + defer rows.Close() + + if !rows.Next() { + if err := rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + + return rows.Scan(holder) +} + +func hookedselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + + var nonFatalErr error + + list, err := rawselect(m, exec, i, query, args...) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + + // Determine where the results are: written to i, or returned in list + if t, _ := toSliceType(i); t == nil { + for _, v := range list { + if v, ok := v.(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } else { + resultsValue := reflect.Indirect(reflect.ValueOf(i)) + for i := 0; i < resultsValue.Len(); i++ { + if v, ok := resultsValue.Index(i).Interface().(HasPostGet); ok { + err := v.PostGet(exec) + if err != nil { + return nil, err + } + } + } + } + return list, nonFatalErr +} + +func rawselect(m *DbMap, exec SqlExecutor, i interface{}, query string, + args ...interface{}) ([]interface{}, error) { + var ( + appendToSlice = false // Write results to i directly? + intoStruct = true // Selecting into a struct? + pointerElements = true // Are the slice elements pointers (vs values)? + ) + + var nonFatalErr error + + tableName := "" + var dynObj DynamicTable + isDynamic := false + if dynObj, isDynamic = i.(DynamicTable); isDynamic { + tableName = dynObj.TableName() + } + + // get type for i, verifying it's a supported destination + t, err := toType(i) + if err != nil { + var err2 error + if t, err2 = toSliceType(i); t == nil { + if err2 != nil { + return nil, err2 + } + return nil, err + } + pointerElements = t.Kind() == reflect.Ptr + if pointerElements { + t = t.Elem() + } + appendToSlice = true + intoStruct = t.Kind() == reflect.Struct + } + + // If the caller supplied a single struct/map argument, assume a "named + // parameter" query. Extract the named arguments from the struct/map, create + // the flat arg slice, and rewrite the query to use the dialect's placeholder. + if len(args) == 1 { + query, args = maybeExpandNamedQuery(m, query, args) + } + + // Run the query + rows, err := exec.Query(query, args...) + if err != nil { + return nil, err + } + defer rows.Close() + + // Fetch the column names as returned from db + cols, err := rows.Columns() + if err != nil { + return nil, err + } + + if !intoStruct && len(cols) > 1 { + return nil, fmt.Errorf("gorp: select into non-struct slice requires 1 column, got %d", len(cols)) + } + + var colToFieldIndex [][]int + if intoStruct { + colToFieldIndex, err = columnToFieldIndex(m, t, tableName, cols) + if err != nil { + if !NonFatalError(err) { + return nil, err + } + nonFatalErr = err + } + } + + conv := m.TypeConverter + + // Add results to one of these two slices. + var ( + list = make([]interface{}, 0) + sliceValue = reflect.Indirect(reflect.ValueOf(i)) + ) + + for { + if !rows.Next() { + // if error occured return rawselect + if rows.Err() != nil { + return nil, rows.Err() + } + // time to exit from outer "for" loop + break + } + v := reflect.New(t) + + if isDynamic { + v.Interface().(DynamicTable).SetTableName(tableName) + } + + dest := make([]interface{}, len(cols)) + + custScan := make([]CustomScanner, 0) + + for x := range cols { + f := v.Elem() + if intoStruct { + index := colToFieldIndex[x] + if index == nil { + // this field is not present in the struct, so create a dummy + // value for rows.Scan to scan into + var dummy dummyField + dest[x] = &dummy + continue + } + f = f.FieldByIndex(index) + } + target := f.Addr().Interface() + if conv != nil { + scanner, ok := conv.FromDb(target) + if ok { + target = scanner.Holder + custScan = append(custScan, scanner) + } + } + dest[x] = target + } + + err = rows.Scan(dest...) + if err != nil { + return nil, err + } + + for _, c := range custScan { + err = c.Bind() + if err != nil { + return nil, err + } + } + + if appendToSlice { + if !pointerElements { + v = v.Elem() + } + sliceValue.Set(reflect.Append(sliceValue, v)) + } else { + list = append(list, v.Interface()) + } + } + + if appendToSlice && sliceValue.IsNil() { + sliceValue.Set(reflect.MakeSlice(sliceValue.Type(), 0, 0)) + } + + return list, nonFatalErr +} diff --git a/vendor/github.com/go-gorp/gorp/table.go b/vendor/github.com/go-gorp/gorp/table.go new file mode 100644 index 000000000..5c513909a --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/table.go @@ -0,0 +1,247 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// TableMap represents a mapping between a Go struct and a database table +// Use dbmap.AddTable() or dbmap.AddTableWithName() to create these +type TableMap struct { + // Name of database table. + TableName string + SchemaName string + gotype reflect.Type + Columns []*ColumnMap + keys []*ColumnMap + indexes []*IndexMap + uniqueTogether [][]string + version *ColumnMap + insertPlan bindPlan + updatePlan bindPlan + deletePlan bindPlan + getPlan bindPlan + dbmap *DbMap +} + +// ResetSql removes cached insert/update/select/delete SQL strings +// associated with this TableMap. Call this if you've modified +// any column names or the table name itself. +func (t *TableMap) ResetSql() { + t.insertPlan = bindPlan{} + t.updatePlan = bindPlan{} + t.deletePlan = bindPlan{} + t.getPlan = bindPlan{} +} + +// SetKeys lets you specify the fields on a struct that map to primary +// key columns on the table. If isAutoIncr is set, result.LastInsertId() +// will be used after INSERT to bind the generated id to the Go struct. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if isAutoIncr is true, and fieldNames length != 1 +// +func (t *TableMap) SetKeys(isAutoIncr bool, fieldNames ...string) *TableMap { + if isAutoIncr && len(fieldNames) != 1 { + panic(fmt.Sprintf( + "gorp: SetKeys: fieldNames length must be 1 if key is auto-increment. (Saw %v fieldNames)", + len(fieldNames))) + } + t.keys = make([]*ColumnMap, 0) + for _, name := range fieldNames { + colmap := t.ColMap(name) + colmap.isPK = true + colmap.isAutoIncr = isAutoIncr + t.keys = append(t.keys, colmap) + } + t.ResetSql() + + return t +} + +// SetUniqueTogether lets you specify uniqueness constraints across multiple +// columns on the table. Each call adds an additional constraint for the +// specified columns. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +// Panics if fieldNames length < 2. +// +func (t *TableMap) SetUniqueTogether(fieldNames ...string) *TableMap { + if len(fieldNames) < 2 { + panic(fmt.Sprintf( + "gorp: SetUniqueTogether: must provide at least two fieldNames to set uniqueness constraint.")) + } + + columns := make([]string, 0) + for _, name := range fieldNames { + columns = append(columns, name) + } + t.uniqueTogether = append(t.uniqueTogether, columns) + t.ResetSql() + + return t +} + +// ColMap returns the ColumnMap pointer matching the given struct field +// name. It panics if the struct does not contain a field matching this +// name. +func (t *TableMap) ColMap(field string) *ColumnMap { + col := colMapOrNil(t, field) + if col == nil { + e := fmt.Sprintf("No ColumnMap in table %s type %s with field %s", + t.TableName, t.gotype.Name(), field) + + panic(e) + } + return col +} + +func colMapOrNil(t *TableMap, field string) *ColumnMap { + for _, col := range t.Columns { + if col.fieldName == field || col.ColumnName == field { + return col + } + } + return nil +} + +// IdxMap returns the IndexMap pointer matching the given index name. +func (t *TableMap) IdxMap(field string) *IndexMap { + for _, idx := range t.indexes { + if idx.IndexName == field { + return idx + } + } + return nil +} + +// AddIndex registers the index with gorp for specified table with given parameters. +// This operation is idempotent. If index is already mapped, the +// existing *IndexMap is returned +// Function will panic if one of the given for index columns does not exists +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +// +func (t *TableMap) AddIndex(name string, idxtype string, columns []string) *IndexMap { + // check if we have a index with this name already + for _, idx := range t.indexes { + if idx.IndexName == name { + return idx + } + } + for _, icol := range columns { + if res := t.ColMap(icol); res == nil { + e := fmt.Sprintf("No ColumnName in table %s to create index on", t.TableName) + panic(e) + } + } + + idx := &IndexMap{IndexName: name, Unique: false, IndexType: idxtype, columns: columns} + t.indexes = append(t.indexes, idx) + t.ResetSql() + return idx +} + +// SetVersionCol sets the column to use as the Version field. By default +// the "Version" field is used. Returns the column found, or panics +// if the struct does not contain a field matching this name. +// +// Automatically calls ResetSql() to ensure SQL statements are regenerated. +func (t *TableMap) SetVersionCol(field string) *ColumnMap { + c := t.ColMap(field) + t.version = c + t.ResetSql() + return c +} + +// SqlForCreateTable gets a sequence of SQL commands that will create +// the specified table and any associated schema +func (t *TableMap) SqlForCreate(ifNotExists bool) string { + s := bytes.Buffer{} + dialect := t.dbmap.Dialect + + if strings.TrimSpace(t.SchemaName) != "" { + schemaCreate := "create schema" + if ifNotExists { + s.WriteString(dialect.IfSchemaNotExists(schemaCreate, t.SchemaName)) + } else { + s.WriteString(schemaCreate) + } + s.WriteString(fmt.Sprintf(" %s;", t.SchemaName)) + } + + tableCreate := "create table" + if ifNotExists { + s.WriteString(dialect.IfTableNotExists(tableCreate, t.SchemaName, t.TableName)) + } else { + s.WriteString(tableCreate) + } + s.WriteString(fmt.Sprintf(" %s (", dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(", ") + } + stype := dialect.ToSqlType(col.gotype, col.MaxSize, col.isAutoIncr) + s.WriteString(fmt.Sprintf("%s %s", dialect.QuoteField(col.ColumnName), stype)) + + if col.isPK || col.isNotNull { + s.WriteString(" not null") + } + if col.isPK && len(t.keys) == 1 { + s.WriteString(" primary key") + } + if col.Unique { + s.WriteString(" unique") + } + if col.isAutoIncr { + s.WriteString(fmt.Sprintf(" %s", dialect.AutoIncrStr())) + } + + x++ + } + } + if len(t.keys) > 1 { + s.WriteString(", primary key (") + for x := range t.keys { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(t.keys[x].ColumnName)) + } + s.WriteString(")") + } + if len(t.uniqueTogether) > 0 { + for _, columns := range t.uniqueTogether { + s.WriteString(", unique (") + for i, column := range columns { + if i > 0 { + s.WriteString(", ") + } + s.WriteString(dialect.QuoteField(column)) + } + s.WriteString(")") + } + } + s.WriteString(") ") + s.WriteString(dialect.CreateTableSuffix()) + s.WriteString(dialect.QuerySuffix()) + return s.String() +} diff --git a/vendor/github.com/go-gorp/gorp/table_bindings.go b/vendor/github.com/go-gorp/gorp/table_bindings.go new file mode 100644 index 000000000..5b049a360 --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/table_bindings.go @@ -0,0 +1,312 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "bytes" + "fmt" + "reflect" + "sync" +) + +// CustomScanner binds a database column value to a Go type +type CustomScanner struct { + // After a row is scanned, Holder will contain the value from the database column. + // Initialize the CustomScanner with the concrete Go type you wish the database + // driver to scan the raw column into. + Holder interface{} + // Target typically holds a pointer to the target struct field to bind the Holder + // value to. + Target interface{} + // Binder is a custom function that converts the holder value to the target type + // and sets target accordingly. This function should return error if a problem + // occurs converting the holder to the target. + Binder func(holder interface{}, target interface{}) error +} + +// Used to filter columns when selectively updating +type ColumnFilter func(*ColumnMap) bool + +func acceptAllFilter(col *ColumnMap) bool { + return true +} + +// Bind is called automatically by gorp after Scan() +func (me CustomScanner) Bind() error { + return me.Binder(me.Holder, me.Target) +} + +type bindPlan struct { + query string + argFields []string + keyFields []string + versField string + autoIncrIdx int + autoIncrFieldName string + once sync.Once +} + +func (plan *bindPlan) createBindInstance(elem reflect.Value, conv TypeConverter) (bindInstance, error) { + bi := bindInstance{query: plan.query, autoIncrIdx: plan.autoIncrIdx, autoIncrFieldName: plan.autoIncrFieldName, versField: plan.versField} + if plan.versField != "" { + bi.existingVersion = elem.FieldByName(plan.versField).Int() + } + + var err error + + for i := 0; i < len(plan.argFields); i++ { + k := plan.argFields[i] + if k == versFieldConst { + newVer := bi.existingVersion + 1 + bi.args = append(bi.args, newVer) + if bi.existingVersion == 0 { + elem.FieldByName(plan.versField).SetInt(int64(newVer)) + } + } else { + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.args = append(bi.args, val) + } + } + + for i := 0; i < len(plan.keyFields); i++ { + k := plan.keyFields[i] + val := elem.FieldByName(k).Interface() + if conv != nil { + val, err = conv.ToDb(val) + if err != nil { + return bindInstance{}, err + } + } + bi.keys = append(bi.keys, val) + } + + return bi, nil +} + +type bindInstance struct { + query string + args []interface{} + keys []interface{} + existingVersion int64 + versField string + autoIncrIdx int + autoIncrFieldName string +} + +func (t *TableMap) bindInsert(elem reflect.Value) (bindInstance, error) { + plan := &t.insertPlan + plan.once.Do(func() { + plan.autoIncrIdx = -1 + + s := bytes.Buffer{} + s2 := bytes.Buffer{} + s.WriteString(fmt.Sprintf("insert into %s (", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + x := 0 + first := true + for y := range t.Columns { + col := t.Columns[y] + if !(col.isAutoIncr && t.dbmap.Dialect.AutoIncrBindValue() == "") { + if !col.Transient { + if !first { + s.WriteString(",") + s2.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + + if col.isAutoIncr { + s2.WriteString(t.dbmap.Dialect.AutoIncrBindValue()) + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } else { + if col.DefaultValue == "" { + s2.WriteString(t.dbmap.Dialect.BindVar(x)) + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } else { + s2.WriteString(col.DefaultValue) + } + } + first = false + } + } else { + plan.autoIncrIdx = y + plan.autoIncrFieldName = col.fieldName + } + } + s.WriteString(") values (") + s.WriteString(s2.String()) + s.WriteString(")") + if plan.autoIncrIdx > -1 { + s.WriteString(t.dbmap.Dialect.AutoIncrInsertSuffix(t.Columns[plan.autoIncrIdx])) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindUpdate(elem reflect.Value, colFilter ColumnFilter) (bindInstance, error) { + if colFilter == nil { + colFilter = acceptAllFilter + } + + plan := &t.updatePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("update %s set ", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + x := 0 + + for y := range t.Columns { + col := t.Columns[y] + if !col.isAutoIncr && !col.Transient && colFilter(col) { + if x > 0 { + s.WriteString(", ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + if col == t.version { + plan.versField = col.fieldName + plan.argFields = append(plan.argFields, versFieldConst) + } else { + plan.argFields = append(plan.argFields, col.fieldName) + } + x++ + } + } + + s.WriteString(" where ") + for y := range t.keys { + col := t.keys[y] + if y > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.argFields = append(plan.argFields, col.fieldName) + plan.keyFields = append(plan.keyFields, col.fieldName) + x++ + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindDelete(elem reflect.Value) (bindInstance, error) { + plan := &t.deletePlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString(fmt.Sprintf("delete from %s", t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName))) + + for y := range t.Columns { + col := t.Columns[y] + if !col.Transient { + if col == t.version { + plan.versField = col.fieldName + } + } + } + + s.WriteString(" where ") + for x := range t.keys { + k := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(k.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, k.fieldName) + plan.argFields = append(plan.argFields, k.fieldName) + } + if plan.versField != "" { + s.WriteString(" and ") + s.WriteString(t.dbmap.Dialect.QuoteField(t.version.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(len(plan.argFields))) + + plan.argFields = append(plan.argFields, plan.versField) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan.createBindInstance(elem, t.dbmap.TypeConverter) +} + +func (t *TableMap) bindGet() *bindPlan { + plan := &t.getPlan + plan.once.Do(func() { + s := bytes.Buffer{} + s.WriteString("select ") + + x := 0 + for _, col := range t.Columns { + if !col.Transient { + if x > 0 { + s.WriteString(",") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + plan.argFields = append(plan.argFields, col.fieldName) + x++ + } + } + s.WriteString(" from ") + s.WriteString(t.dbmap.Dialect.QuotedTableForQuery(t.SchemaName, t.TableName)) + s.WriteString(" where ") + for x := range t.keys { + col := t.keys[x] + if x > 0 { + s.WriteString(" and ") + } + s.WriteString(t.dbmap.Dialect.QuoteField(col.ColumnName)) + s.WriteString("=") + s.WriteString(t.dbmap.Dialect.BindVar(x)) + + plan.keyFields = append(plan.keyFields, col.fieldName) + } + s.WriteString(t.dbmap.Dialect.QuerySuffix()) + + plan.query = s.String() + }) + + return plan +} diff --git a/vendor/github.com/go-gorp/gorp/test_all.sh b/vendor/github.com/go-gorp/gorp/test_all.sh new file mode 100755 index 000000000..4c99584ef --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/test_all.sh @@ -0,0 +1,41 @@ +#!/bin/bash -e + +# on macs, you may need to: +# export GOBUILDFLAG=-ldflags -linkmode=external + +coveralls_testflags="-v -covermode=count -coverprofile=coverage.out" + +echo "Running unit tests" +ginkgo -r -race -randomizeAllSpecs -keepGoing -- -test.run TestGorp + +echo "Testing against mysql" +export GORP_TEST_DSN=gorptest/gorptest/gorptest +export GORP_TEST_DIALECT=mysql +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against gomysql" +export GORP_TEST_DSN=gorptest:gorptest@/gorptest +export GORP_TEST_DIALECT=gomysql +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against postgres" +export GORP_TEST_DSN="user=gorptest password=gorptest dbname=gorptest sslmode=disable" +export GORP_TEST_DIALECT=postgres +go test $coveralls_testflags $GOBUILDFLAG $@ . + +echo "Testing against sqlite" +export GORP_TEST_DSN=/tmp/gorptest.bin +export GORP_TEST_DIALECT=sqlite +go test $coveralls_testflags $GOBUILDFLAG $@ . +rm -f /tmp/gorptest.bin + +case $(go version) in + *go1.4*) + if [ "$(type -p goveralls)" != "" ]; then + goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci + elif [ -x $HOME/gopath/bin/goveralls ]; then + $HOME/gopath/bin/goveralls -covermode=count -coverprofile=coverage.out -service=travis-ci + fi + ;; + *) ;; +esac diff --git a/vendor/github.com/go-gorp/gorp/transaction.go b/vendor/github.com/go-gorp/gorp/transaction.go new file mode 100644 index 000000000..4a4486fcd --- /dev/null +++ b/vendor/github.com/go-gorp/gorp/transaction.go @@ -0,0 +1,202 @@ +// Copyright 2012 James Cooper. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// Package gorp provides a simple way to marshal Go structs to and from +// SQL databases. It uses the database/sql package, and should work with any +// compliant database/sql driver. +// +// Source code and project home: +// https://github.com/go-gorp/gorp + +package gorp + +import ( + "context" + "database/sql" + "time" +) + +// Transaction represents a database transaction. +// Insert/Update/Delete/Get/Exec operations will be run in the context +// of that transaction. Transactions should be terminated with +// a call to Commit() or Rollback() +type Transaction struct { + ctx context.Context + dbmap *DbMap + tx *sql.Tx + closed bool +} + +func (t *Transaction) WithContext(ctx context.Context) SqlExecutor { + copy := &Transaction{} + *copy = *t + copy.ctx = ctx + return copy +} + +// Insert has the same behavior as DbMap.Insert(), but runs in a transaction. +func (t *Transaction) Insert(list ...interface{}) error { + return insert(t.dbmap, t, list...) +} + +// Update had the same behavior as DbMap.Update(), but runs in a transaction. +func (t *Transaction) Update(list ...interface{}) (int64, error) { + return update(t.dbmap, t, nil, list...) +} + +// UpdateColumns had the same behavior as DbMap.UpdateColumns(), but runs in a transaction. +func (t *Transaction) UpdateColumns(filter ColumnFilter, list ...interface{}) (int64, error) { + return update(t.dbmap, t, filter, list...) +} + +// Delete has the same behavior as DbMap.Delete(), but runs in a transaction. +func (t *Transaction) Delete(list ...interface{}) (int64, error) { + return delete(t.dbmap, t, list...) +} + +// Get has the same behavior as DbMap.Get(), but runs in a transaction. +func (t *Transaction) Get(i interface{}, keys ...interface{}) (interface{}, error) { + return get(t.dbmap, t, i, keys...) +} + +// Select has the same behavior as DbMap.Select(), but runs in a transaction. +func (t *Transaction) Select(i interface{}, query string, args ...interface{}) ([]interface{}, error) { + return hookedselect(t.dbmap, t, i, query, args...) +} + +// Exec has the same behavior as DbMap.Exec(), but runs in a transaction. +func (t *Transaction) Exec(query string, args ...interface{}) (sql.Result, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return exec(t, query, args...) +} + +// SelectInt is a convenience wrapper around the gorp.SelectInt function. +func (t *Transaction) SelectInt(query string, args ...interface{}) (int64, error) { + return SelectInt(t, query, args...) +} + +// SelectNullInt is a convenience wrapper around the gorp.SelectNullInt function. +func (t *Transaction) SelectNullInt(query string, args ...interface{}) (sql.NullInt64, error) { + return SelectNullInt(t, query, args...) +} + +// SelectFloat is a convenience wrapper around the gorp.SelectFloat function. +func (t *Transaction) SelectFloat(query string, args ...interface{}) (float64, error) { + return SelectFloat(t, query, args...) +} + +// SelectNullFloat is a convenience wrapper around the gorp.SelectNullFloat function. +func (t *Transaction) SelectNullFloat(query string, args ...interface{}) (sql.NullFloat64, error) { + return SelectNullFloat(t, query, args...) +} + +// SelectStr is a convenience wrapper around the gorp.SelectStr function. +func (t *Transaction) SelectStr(query string, args ...interface{}) (string, error) { + return SelectStr(t, query, args...) +} + +// SelectNullStr is a convenience wrapper around the gorp.SelectNullStr function. +func (t *Transaction) SelectNullStr(query string, args ...interface{}) (sql.NullString, error) { + return SelectNullStr(t, query, args...) +} + +// SelectOne is a convenience wrapper around the gorp.SelectOne function. +func (t *Transaction) SelectOne(holder interface{}, query string, args ...interface{}) error { + return SelectOne(t.dbmap, t, holder, query, args...) +} + +// Commit commits the underlying database transaction. +func (t *Transaction) Commit() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "commit;") + } + return t.tx.Commit() + } + + return sql.ErrTxDone +} + +// Rollback rolls back the underlying database transaction. +func (t *Transaction) Rollback() error { + if !t.closed { + t.closed = true + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, "rollback;") + } + return t.tx.Rollback() + } + + return sql.ErrTxDone +} + +// Savepoint creates a savepoint with the given name. The name is interpolated +// directly into the SQL SAVEPOINT statement, so you must sanitize it if it is +// derived from user input. +func (t *Transaction) Savepoint(name string) error { + query := "savepoint " + t.dbmap.Dialect.QuoteField(name) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// RollbackToSavepoint rolls back to the savepoint with the given name. The +// name is interpolated directly into the SQL SAVEPOINT statement, so you must +// sanitize it if it is derived from user input. +func (t *Transaction) RollbackToSavepoint(savepoint string) error { + query := "rollback to savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// ReleaseSavepint releases the savepoint with the given name. The name is +// interpolated directly into the SQL SAVEPOINT statement, so you must sanitize +// it if it is derived from user input. +func (t *Transaction) ReleaseSavepoint(savepoint string) error { + query := "release savepoint " + t.dbmap.Dialect.QuoteField(savepoint) + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + _, err := exec(t, query) + return err +} + +// Prepare has the same behavior as DbMap.Prepare(), but runs in a transaction. +func (t *Transaction) Prepare(query string) (*sql.Stmt, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, nil) + } + return prepare(t, query) +} + +func (t *Transaction) QueryRow(query string, args ...interface{}) *sql.Row { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, query, args...) + } + return queryRow(t, query, args...) +} + +func (t *Transaction) Query(q string, args ...interface{}) (*sql.Rows, error) { + if t.dbmap.logger != nil { + now := time.Now() + defer t.dbmap.trace(now, q, args...) + } + return query(t, q, args...) +} From cb254bddf95e9b648e94c10fa102463583a92867 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Sat, 29 Dec 2018 10:27:51 +0800 Subject: [PATCH 011/302] Post processing data retrieved from db --- api/models/blocks.go | 8 ++++++++ api/models/transactions.go | 9 +++++++++ 2 files changed, 17 insertions(+) diff --git a/api/models/blocks.go b/api/models/blocks.go index 3ad5162cc..52f543ab9 100644 --- a/api/models/blocks.go +++ b/api/models/blocks.go @@ -3,6 +3,8 @@ package models import ( "database/sql" "time" + + "github.com/go-gorp/gorp" ) // BlocksModel groups operations on Blocks. @@ -21,6 +23,12 @@ type Block struct { TxCount int `db:"tx_count" json:"tx_count"` } +// PostGet is the hook after SELECT query. +func (b *Block) PostGet(s gorp.SqlExecutor) error { + b.TimestampHuman = time.Unix(0, b.Timestamp) + return nil +} + // GetBlockList get a list of blocks with height in [from, to). func (m *BlocksModel) GetBlockList(from, to int) (blocks []*Block, err error) { query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count diff --git a/api/models/transactions.go b/api/models/transactions.go index 071e39cf5..0c1e0ba2f 100644 --- a/api/models/transactions.go +++ b/api/models/transactions.go @@ -2,8 +2,11 @@ package models import ( "database/sql" + "encoding/json" "fmt" "time" + + "github.com/go-gorp/gorp" ) // TransactionsModel groups operations on Transactions. @@ -25,6 +28,12 @@ type Transaction struct { Tx interface{} `db:"-" json:"tx"` } +// PostGet is the hook after SELECT query. +func (tx *Transaction) PostGet(s gorp.SqlExecutor) error { + tx.TimestampHuman = time.Unix(0, tx.Timestamp) + return json.Unmarshal([]byte(tx.Raw), &tx.Tx) +} + // GetTransactionByHash get a transaction by its hash. func (m *TransactionsModel) GetTransactionByHash(hash string) (tx *Transaction, err error) { tx = &Transaction{} From 94ff6ecdc7bdeb7c8b36817d523552d750b96da7 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 17:17:06 +0800 Subject: [PATCH 012/302] Index blocks and transactions for API node --- blockproducer/chain.go | 8 +++ blockproducer/config.go | 2 + blockproducer/storage.go | 138 +++++++++++++++++++++++++++++++-------- cmd/cqld/bootstrap.go | 1 + cmd/cqld/main.go | 3 + 5 files changed, 123 insertions(+), 29 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index fca7d2510..597c068e8 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -77,6 +77,7 @@ type Chain struct { headBranch *branch branches []*branch txPool map[hash.Hash]pi.Transaction + mode string } // NewChain creates a new blockchain. @@ -252,6 +253,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) headBranch: head, branches: branches, txPool: txPool, + mode: cfg.Mode, } log.WithFields(log.Fields{ "index": c.locSvIndex, @@ -713,6 +715,12 @@ func (c *Chain) replaceAndSwitchToBranch( // Prepare storage procedures to update immutable database sps = append(sps, addBlock(height, newBlock)) + + // Index blocks and transactions if running as API node + if c.mode == "api" { + sps = append(sps, buildBlockIndex(height, newBlock)) + } + for k, v := range c.immutable.dirty.accounts { if v != nil { sps = append(sps, updateAccount(v)) diff --git a/blockproducer/config.go b/blockproducer/config.go index ff56602e9..d9d4c3ae1 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -30,6 +30,7 @@ const ( // Config is the main chain configuration. type Config struct { + Mode string Genesis *types.BPBlock DataFile string @@ -49,6 +50,7 @@ func NewConfig(genesis *types.BPBlock, dataFile string, server *rpc.Server, peers *proto.Peers, nodeID proto.NodeID, period time.Duration, tick time.Duration) *Config { config := Config{ + Mode: "normal", Genesis: genesis, DataFile: dataFile, Server: server, diff --git a/blockproducer/storage.go b/blockproducer/storage.go index 8f2e9242c..aa1d7a9d5 100644 --- a/blockproducer/storage.go +++ b/blockproducer/storage.go @@ -19,6 +19,7 @@ package blockproducer import ( "bytes" "database/sql" + "encoding/json" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/hash" @@ -35,40 +36,77 @@ var ( ddls = [...]string{ // Chain state tables `CREATE TABLE IF NOT EXISTS "blocks" ( - "height" INT, - "hash" TEXT, - "parent" TEXT, - "encoded" BLOB, - UNIQUE ("hash") -)`, + "height" INT, + "hash" TEXT, + "parent" TEXT, + "encoded" BLOB, + UNIQUE ("hash") + );`, + `CREATE TABLE IF NOT EXISTS "txPool" ( - "type" INT, - "hash" TEXT, - "encoded" BLOB, - UNIQUE ("hash") -)`, + "type" INT, + "hash" TEXT, + "encoded" BLOB, + UNIQUE ("hash") + );`, + `CREATE TABLE IF NOT EXISTS "irreversible" ( - "id" INT, - "hash" TEXT, - UNIQUE ("id") -)`, + "id" INT, + "hash" TEXT, + UNIQUE ("id") + );`, + // Meta state tables `CREATE TABLE IF NOT EXISTS "accounts" ( - "address" TEXT, - "encoded" BLOB, - UNIQUE ("address") -)`, + "address" TEXT, + "encoded" BLOB, + UNIQUE ("address") + );`, + `CREATE TABLE IF NOT EXISTS "shardChain" ( - "address" TEXT, - "id" TEXT, - "encoded" BLOB, - UNIQUE ("address", "id") -)`, + "address" TEXT, + "id" TEXT, + "encoded" BLOB, + UNIQUE ("address", "id") + );`, + `CREATE TABLE IF NOT EXISTS "provider" ( - "address" TEXT, - "encoded" BLOB, - UNIQUE ("address") -)`, + "address" TEXT, + "encoded" BLOB, + UNIQUE ("address") + );`, + + `CREATE TABLE IF NOT EXISTS "indexed_blocks" ( + "height" INTEGER PRIMARY KEY, + "hash" TEXT, + "timestamp" INTEGER, + "version" INTEGER, + "producer" TEXT, + "merkle_root" TEXT, + "parent" TEXT, + "tx_count" INTEGER + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__hash" ON "indexed_blocks" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__timestamp" ON "indexed_blocks" ("timestamp" DESC);`, + + `CREATE TABLE IF NOT EXISTS "indexed_transactions" ( + "block_height" INTEGER, + "tx_index" INTEGER, + "hash" TEXT, + "block_hash" TEXT, + "timestamp" INTEGER, + "tx_type" INTEGER, + "address" TEXT, + "raw" TEXT, + PRIMARY KEY ("block_height", "tx_index") + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__hash" ON "indexed_transactions" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__block_hash" ON "indexed_transactions" ("block_hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__timestamp" ON "indexed_transactions" ("timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__tx_type__timestamp" ON "indexed_transactions" ("tx_type", "timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__address__timestamp" ON "indexed_transactions" ("address", "timestamp" DESC);`, } ) @@ -153,7 +191,49 @@ func addTx(t pi.Transaction) storageProcedure { uint32(t.GetTransactionType()), t.Hash().String(), enc.Bytes()) - return + return err + } +} + +func buildBlockIndex(height uint32, b *types.BPBlock) storageProcedure { + return func(tx *sql.Tx) (err error) { + var p = b.Producer() + if _, err = tx.Exec(`INSERT OR REPLACE INTO "indexed_blocks" + ("height", "hash", "timestamp", "version", "producer", + "merkle_root", "parent", "tx_count") VALUES (?,?,?,?,?,?,?,?)`, + height, + b.BlockHash().String(), + b.Timestamp().UnixNano(), + b.SignedHeader.Version, + p.String(), + b.SignedHeader.MerkleRoot.String(), + b.ParentHash().String(), + len(b.Transactions), + ); err != nil { + return err + } + + for txIndex, t := range b.Transactions { + var ( + addr = t.GetAccountAddress() + raw, _ = json.Marshal(t) + ) + if _, err := tx.Exec(`INSERT OR REPLACE INTO "indexed_transactions" + ("block_height", "tx_index", "hash", "block_hash", "timestamp", + "tx_type", "address", "raw") VALUES (?,?,?,?,?,?,?,?)`, + height, + txIndex, + t.Hash().String(), + b.BlockHash().String(), + 0, // FIXME: use Transaction.GetTimestamp() + t.GetTransactionType(), + addr.String(), + string(raw), + ); err != nil { + return err + } + } + return nil } } diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 8a6157827..6eec252c3 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -135,6 +135,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { conf.GConf.BPPeriod, conf.GConf.BPTick, ) + chainConfig.Mode = mode chain, err := bp.NewChain(chainConfig) if err != nil { log.WithError(err).Error("init chain failed") diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 5164735d7..e52bd3463 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -58,6 +58,8 @@ var ( clientMode bool clientOperation string + + mode string // "normal", "api" ) const name = `cqld` @@ -75,6 +77,7 @@ func init() { flag.BoolVar(&clientMode, "client", false, "run as client") flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") + flag.StringVar(&mode, "mode", "normal", "run mode, e.g. normal, api") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) From f61913dc0e838c075d0f934430842b135e632e77 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 17:40:59 +0800 Subject: [PATCH 013/302] Limit transactions count per block --- blockproducer/branch.go | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 8c511ccef..7bdb56b84 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -29,6 +29,8 @@ import ( "github.com/pkg/errors" ) +const transactionsLimit = 1000 + type branch struct { head *blockNode preview *metaState @@ -172,20 +174,28 @@ func (b *branch) produceBlock( br *branch, bl *types.BPBlock, err error, ) { var ( - cpy = b.makeArena() - txs = cpy.sortUnpackedTxs() - out = make([]pi.Transaction, 0, len(txs)) - ierr error + cpy = b.makeArena() + txs = cpy.sortUnpackedTxs() + ierr error + packCount = transactionsLimit ) - for _, v := range txs { + + if len(txs) < packCount { + packCount = len(txs) + } + + out := make([]pi.Transaction, len(txs)) + for i := 0; i < packCount; i++ { + v := txs[i] var k = v.Hash() if ierr = cpy.preview.apply(v); ierr != nil { continue } delete(cpy.unpacked, k) cpy.packed[k] = v - out = append(out, v) + out[i] = v } + // Create new block and update head var block = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ From 6e89406df4860b6c2d79b0c649f2ecfd66a5ad78 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 18:05:03 +0800 Subject: [PATCH 014/302] Fix pack count when producing blocks --- blockproducer/branch.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 7bdb56b84..03209443a 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -29,7 +29,7 @@ import ( "github.com/pkg/errors" ) -const transactionsLimit = 1000 +const transactionsLimit = 10000 type branch struct { head *blockNode @@ -184,7 +184,7 @@ func (b *branch) produceBlock( packCount = len(txs) } - out := make([]pi.Transaction, len(txs)) + out := make([]pi.Transaction, packCount) for i := 0; i < packCount; i++ { v := txs[i] var k = v.Hash() From 2994eb7f720f9d248e12a7ccbf4a6210807fd5fd Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 18:31:56 +0800 Subject: [PATCH 015/302] Check error: ErrTooManyTransactionsInBlock --- blockproducer/branch.go | 13 +++++++++++-- blockproducer/errors.go | 3 ++- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 03209443a..5cf60da2b 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -58,6 +58,10 @@ func newBranch( } // Apply new blocks to view and pool for _, bn := range list { + if len(bn.block.Transactions) > transactionsLimit { + return nil, ErrTooManyTransactionsInBlock + } + for _, v := range bn.block.Transactions { var k = v.Hash() // Check in tx pool @@ -128,6 +132,11 @@ func (b *branch) applyBlock(n *blockNode) (br *branch, err error) { return } var cpy = b.makeArena() + + if len(n.block.Transactions) > transactionsLimit { + return nil, ErrTooManyTransactionsInBlock + } + for _, v := range n.block.Transactions { var k = v.Hash() // Check in tx pool @@ -184,7 +193,7 @@ func (b *branch) produceBlock( packCount = len(txs) } - out := make([]pi.Transaction, packCount) + out := make([]pi.Transaction, 0, packCount) for i := 0; i < packCount; i++ { v := txs[i] var k = v.Hash() @@ -193,7 +202,7 @@ func (b *branch) produceBlock( } delete(cpy.unpacked, k) cpy.packed[k] = v - out[i] = v + out = append(out, v) } // Create new block and update head diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 1357a2a29..5c9c24af0 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -31,7 +31,8 @@ var ( ErrInvalidMerkleTreeRoot = errors.New("Block merkle tree root does not match the tx hashes") // ErrParentNotMatch defines invalid parent hash. ErrParentNotMatch = errors.New("Block's parent hash cannot match best block") - + // ErrTooManyTransactionsInBlock defines error of too many transactions in a block. + ErrTooManyTransactionsInBlock = errors.New("too many transactions in block") // ErrBalanceOverflow indicates that there will be an overflow after balance manipulation. ErrBalanceOverflow = errors.New("balance overflow") // ErrInsufficientBalance indicates that an account has insufficient balance for spending. From e6d9524688799a549341495ae087155fb458a5f7 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 20:26:57 +0800 Subject: [PATCH 016/302] Count valid transactions while packing --- blockproducer/branch.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 5cf60da2b..97bd07140 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -194,8 +194,7 @@ func (b *branch) produceBlock( } out := make([]pi.Transaction, 0, packCount) - for i := 0; i < packCount; i++ { - v := txs[i] + for _, v := range txs { var k = v.Hash() if ierr = cpy.preview.apply(v); ierr != nil { continue @@ -203,6 +202,9 @@ func (b *branch) produceBlock( delete(cpy.unpacked, k) cpy.packed[k] = v out = append(out, v) + if len(out) == packCount { + break + } } // Create new block and update head From 1ffcf2d65b12b2654e6d6c9bebc3cbca6782eca7 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 3 Jan 2019 20:57:23 +0800 Subject: [PATCH 017/302] Suppress producing blocks for API node --- blockproducer/chain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 597c068e8..f5d9ed2ce 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -392,7 +392,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { defer c.increaseNextHeight() // Skip if it's not my turn - if !c.isMyTurn() { + if c.mode == "api" || !c.isMyTurn() { return } // Normally, a block producing should start right after the new period, but more time may also From 2518fdff8c1e0bed2c3e690ceb0863d5053d1437 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 4 Jan 2019 11:18:29 +0800 Subject: [PATCH 018/302] Don't start JSON-RPC server if wsapiAddr is empty --- cmd/cqld/bootstrap.go | 12 +++++++----- cmd/cqld/main.go | 2 +- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 4475ab6dc..2e712137a 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -158,12 +158,14 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { }() // start json-rpc server - jsonrpcServer := &api.Service{ - WebsocketAddr: wsapiAddr, - ReadTimeout: 60 * time.Second, - WriteTimeout: 60 * time.Second, + if wsapiAddr != "" { + jsonrpcServer := &api.Service{ + WebsocketAddr: wsapiAddr, + ReadTimeout: 60 * time.Second, + WriteTimeout: 60 * time.Second, + } + jsonrpcServer.StartServers() } - jsonrpcServer.StartServers() signalCh := make(chan os.Signal, 1) signal.Notify( diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 981f830fc..a0428236b 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -78,7 +78,7 @@ func init() { flag.BoolVar(&clientMode, "client", false, "run as client") flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") - flag.StringVar(&wsapiAddr, "wsapi", ":8546", "Address of the websocket JSON-RPC API") + flag.StringVar(&wsapiAddr, "wsapi", "", "Address of the websocket JSON-RPC API") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) From e747057e1e797336097f90d614dc28ebb1f5a0fc Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 4 Jan 2019 11:57:07 +0800 Subject: [PATCH 019/302] Remove cqld client mode code --- cmd/cqld/client.go | 145 --------------------------------------------- cmd/cqld/main.go | 15 ----- 2 files changed, 160 deletions(-) delete mode 100644 cmd/cqld/client.go diff --git a/cmd/cqld/client.go b/cmd/cqld/client.go deleted file mode 100644 index 39a18c47c..000000000 --- a/cmd/cqld/client.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - "net" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "golang.org/x/crypto/ssh/terminal" -) - -func runClient(nodeID proto.NodeID) (err error) { - var idx int - for i, n := range conf.GConf.KnownNodes { - if n.ID == nodeID { - idx = i - break - } - } - - rootPath := conf.GConf.WorkingRoot - pubKeyStorePath := filepath.Join(rootPath, conf.GConf.PubKeyStoreFile) - privateKeyPath := filepath.Join(rootPath, conf.GConf.PrivateKeyFile) - - // read master key - var masterKey []byte - if !conf.GConf.IsTestMode { - fmt.Print("Type in Master key to continue: ") - masterKey, err = terminal.ReadPassword(syscall.Stdin) - if err != nil { - fmt.Printf("Failed to read Master Key: %v", err) - } - fmt.Println("") - } - - err = kms.InitLocalKeyPair(privateKeyPath, masterKey) - if err != nil { - log.WithError(err).Error("init local key pair failed") - return - } - - conf.GConf.KnownNodes[idx].PublicKey, err = kms.GetLocalPublicKey() - if err != nil { - log.WithError(err).Error("get local public key failed") - return - } - //nodeInfo := asymmetric.GetPubKeyNonce(AllNodes[idx].PublicKey, 20, 500*time.Millisecond, nil) - //log.Debugf("client pubkey:\n%x", AllNodes[idx].PublicKey.Serialize()) - //log.Debugf("client nonce:\n%v", nodeInfo) - - // init nodes - log.Info("init peers") - _, _, _, err = initNodePeers(nodeID, pubKeyStorePath) - if err != nil { - return - } - - // do client request - if err = clientRequest(clientOperation, flag.Arg(0)); err != nil { - return - } - - return -} - -func clientRequest(reqType string, sql string) (err error) { - log.SetLevel(log.DebugLevel) - leaderNodeID := kms.BP.NodeID - var conn net.Conn - var client *rpc.Client - - if len(reqType) > 0 && strings.Title(reqType[:1]) == "P" { - if conn, err = rpc.DialToNode(leaderNodeID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - reqType = "Ping" - node1 := proto.NewNode() - node1.InitNodeCryptoInfo(100 * time.Millisecond) - - reqA := &proto.PingReq{ - Node: *node1, - } - - respA := new(proto.PingResp) - log.Debugf("req %#v: %#v", reqType, reqA) - err = client.Call("DHT."+reqType, reqA, respA) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, respA) - } else { - for _, bp := range conf.GConf.KnownNodes { - if bp.Role == proto.Leader || bp.Role == proto.Follower { - if conn, err = rpc.DialToNode(bp.ID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - log.WithField("bp", bp.ID).Debug("calling BP") - reqType = "FindNeighbor" - req := &proto.FindNeighborReq{ - ID: proto.NodeID(flag.Arg(0)), - Count: 10, - } - resp := new(proto.FindNeighborResp) - log.Debugf("req %#v: %#v", reqType, req) - err = client.Call("DHT."+reqType, req, resp) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, resp) - } - } - } - - return -} diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index a0428236b..4f536e039 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -56,9 +56,6 @@ var ( showVersion bool configFile string - clientMode bool - clientOperation string - wsapiAddr string ) @@ -75,9 +72,6 @@ func init() { flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") - flag.BoolVar(&clientMode, "client", false, "run as client") - flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") - flag.StringVar(&wsapiAddr, "wsapi", "", "Address of the websocket JSON-RPC API") flag.Usage = func() { @@ -131,15 +125,6 @@ func main() { utils.StartProfile(cpuProfile, memProfile) defer utils.StopProfile() - if clientMode { - if err := runClient(conf.GConf.ThisNodeID); err != nil { - log.WithError(err).Fatal("run client failed") - } else { - log.Info("run client success") - } - return - } - if err := runNode(conf.GConf.ThisNodeID, conf.GConf.ListenAddr); err != nil { log.WithError(err).Fatal("run kayak failed") } From 25174a2e1d24d1e8f4572cbebc52361f49f11c00 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 4 Jan 2019 14:16:14 +0800 Subject: [PATCH 020/302] Remove Signee and Signature fields of models.Transaction --- api/models/transactions.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/api/models/transactions.go b/api/models/transactions.go index 0c1e0ba2f..e736a71f2 100644 --- a/api/models/transactions.go +++ b/api/models/transactions.go @@ -21,9 +21,7 @@ type Transaction struct { Timestamp int64 `db:"timestamp" json:"-"` TimestampHuman time.Time `db:"-" json:"timestamp"` TxType int `db:"tx_type" json:"type"` - Signee string `db:"signee" json:"signee"` Address string `db:"address" json:"address"` - Signature string `db:"signature" json:"signature"` Raw string `db:"raw" json:"raw"` Tx interface{} `db:"-" json:"tx"` } @@ -38,7 +36,7 @@ func (tx *Transaction) PostGet(s gorp.SqlExecutor) error { func (m *TransactionsModel) GetTransactionByHash(hash string) (tx *Transaction, err error) { tx = &Transaction{} query := `SELECT block_height, tx_index, hash, block_hash, timestamp, tx_type, - signee, address, signature, raw + address, raw FROM indexed_transactions WHERE hash = ?` err = chaindb.SelectOne(tx, query, hash) if err == sql.ErrNoRows { @@ -64,7 +62,7 @@ func (m *TransactionsModel) GetTransactionList(since, direction string, limit in } query := fmt.Sprintf(`SELECT block_height, tx_index, hash, block_hash, - timestamp, tx_type, signee, address, signature, raw + timestamp, tx_type, address, raw FROM indexed_transactions WHERE block_height %s ? and tx_index %s ? ORDER BY block_height %s, tx_index %s From 0803124173c53c8f84716ab49a28e8ce5b7f93bc Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 4 Jan 2019 15:00:49 +0800 Subject: [PATCH 021/302] Don't use GConf in api package --- api/models/models.go | 9 ++++----- api/service.go | 3 ++- cmd/cqld/bootstrap.go | 1 + 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/api/models/models.go b/api/models/models.go index 9e8809ba6..f918bc2ea 100644 --- a/api/models/models.go +++ b/api/models/models.go @@ -4,7 +4,6 @@ import ( "database/sql" "fmt" - "github.com/CovenantSQL/CovenantSQL/conf" _ "github.com/CovenantSQL/go-sqlite3-encrypt" // sqlite3 driver "github.com/go-gorp/gorp" "github.com/pkg/errors" @@ -15,12 +14,12 @@ var ( ) // InitModels setup the models package. -func InitModels() error { - return initChainDBConnection() +func InitModels(dbFile string) error { + return initChainDBConnection(dbFile) } -func initChainDBConnection() error { - dsn := fmt.Sprintf("%s?_journal=WAL&mode=ro", conf.GConf.BP.ChainFileName) +func initChainDBConnection(dbFile string) error { + dsn := fmt.Sprintf("%s?_journal=WAL&mode=ro", dbFile) underdb, err := sql.Open("sqlite3", dsn) if err != nil { return errors.WithMessage(err, "unable to open chain.db") diff --git a/api/service.go b/api/service.go index 18e1240bd..ec7dfcc2f 100644 --- a/api/service.go +++ b/api/service.go @@ -21,6 +21,7 @@ import ( // Service configs the API service. type Service struct { + DBFile string // the path to the database in which stored indexed data WebsocketAddr string // start a websocket server ReadTimeout time.Duration WriteTimeout time.Duration @@ -41,7 +42,7 @@ func (s *Service) StopServers() { // RunServers start API servers in a blocking way, fatal on errors. func (s *Service) RunServers() { // setup database - if err := models.InitModels(); err != nil { + if err := models.InitModels(s.DBFile); err != nil { log.WithError(err).Fatal("api: init models failed") return } diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 2e712137a..bd4ca11b0 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -160,6 +160,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // start json-rpc server if wsapiAddr != "" { jsonrpcServer := &api.Service{ + DBFile: conf.GConf.BP.ChainFileName, WebsocketAddr: wsapiAddr, ReadTimeout: 60 * time.Second, WriteTimeout: 60 * time.Second, From 316041380eb3613c60bc20c345068a12da435437 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 4 Jan 2019 16:00:12 +0800 Subject: [PATCH 022/302] Add tx TTL, refactor chain codes --- blockproducer/bpinfo.go | 68 +++++ blockproducer/chain.go | 283 +++++++----------- blockproducer/chain_gossip.go | 136 +++++++++ .../{chain_service.go => chain_io.go} | 0 blockproducer/chain_test.go | 8 +- blockproducer/limits/limits.go | 24 ++ blockproducer/rpc.go | 13 +- client/driver.go | 1 + cmd/cql-faucet/verifier.go | 2 +- cmd/cql-minerd/provide_service.go | 1 + cmd/cql-utils/rpc.go | 1 + sqlchain/chain.go | 2 +- types/bprpc.go | 4 +- 13 files changed, 351 insertions(+), 192 deletions(-) create mode 100644 blockproducer/bpinfo.go create mode 100644 blockproducer/chain_gossip.go rename blockproducer/{chain_service.go => chain_io.go} (100%) create mode 100644 blockproducer/limits/limits.go diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go new file mode 100644 index 000000000..cb925448b --- /dev/null +++ b/blockproducer/bpinfo.go @@ -0,0 +1,68 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockproducer + +import ( + "fmt" + + "github.com/CovenantSQL/CovenantSQL/proto" +) + +type blockProducerInfo struct { + rank uint32 + total uint32 + role string + nodeID proto.NodeID +} + +// String implements fmt.Stringer. +func (i *blockProducerInfo) String() string { + return fmt.Sprintf("[%d/%d] (%s) %s", i.rank+1, i.total, i.role, i.nodeID) +} + +func newBlockProduerInfos( + localNodeID proto.NodeID, peers *proto.Peers, +) ( + localBPInfo *blockProducerInfo, bpInfos []*blockProducerInfo, err error, +) { + var ( + total = len(peers.PeersHeader.Servers) + index int32 + found bool + ) + + if index, found = peers.Find(localNodeID); !found { + err = ErrLocalNodeNotFound + return + } + + bpInfos = make([]*blockProducerInfo, total) + for i, v := range peers.PeersHeader.Servers { + var role = "F" + if v == peers.Leader { + role = "L" + } + bpInfos[i] = &blockProducerInfo{ + rank: uint32(i), + total: uint32(total), + role: role, + nodeID: v, + } + } + localBPInfo = bpInfos[index] + return +} diff --git a/blockproducer/chain.go b/blockproducer/chain.go index cea195eda..1420ea5cc 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -22,10 +22,10 @@ import ( "math" "os" "sync" - "sync/atomic" "time" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + pl "github.com/CovenantSQL/CovenantSQL/blockproducer/limits" "github.com/CovenantSQL/CovenantSQL/chainbus" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" @@ -55,8 +55,8 @@ type Chain struct { st xi.Storage bs chainbus.Bus // Channels for incoming blocks and transactions - pendingBlocks chan *types.BPBlock - pendingTxs chan pi.Transaction + pendingBlocks chan *types.BPBlock + pendingAddTxReqs chan *types.AddTxReq // The following fields are read-only in runtime address proto.AccountAddress genesisTime time.Time @@ -64,11 +64,10 @@ type Chain struct { tick time.Duration sync.RWMutex // protects following fields - peers *proto.Peers - nodeID proto.NodeID + bpInfos []*blockProducerInfo + localBPInfo *blockProducerInfo + localNodeID proto.NodeID confirms uint32 - serversNum uint32 - locSvIndex uint32 nextHeight uint32 offset time.Duration lastIrre *blockNode @@ -88,7 +87,6 @@ func NewChain(cfg *Config) (c *Chain, err error) { func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) { var ( existed bool - ok bool ierr error cld context.Context @@ -107,9 +105,10 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) br, head *branch headIndex int - pubKey *asymmetric.PublicKey - addr proto.AccountAddress - locSvIndex int32 + pubKey *asymmetric.PublicKey + addr proto.AccountAddress + bpInfos []*blockProducerInfo + localBPInfo *blockProducerInfo bus = chainbus.New() ) @@ -206,8 +205,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } // Setup peer list - if locSvIndex, ok = cfg.Peers.Find(cfg.NodeID); !ok { - err = ErrLocalNodeNotFound + if localBPInfo, bpInfos, err = newBlockProduerInfos(cfg.NodeID, cfg.Peers); err != nil { return } if t = cfg.ConfirmThreshold; t <= 0.0 { @@ -230,21 +228,20 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) st: st, bs: bus, - pendingBlocks: make(chan *types.BPBlock), - pendingTxs: make(chan pi.Transaction), + pendingBlocks: make(chan *types.BPBlock), + pendingAddTxReqs: make(chan *types.AddTxReq), address: addr, genesisTime: cfg.Genesis.SignedHeader.Timestamp, period: cfg.Period, tick: cfg.Tick, - peers: cfg.Peers, - nodeID: cfg.NodeID, - confirms: m, - serversNum: l, - locSvIndex: uint32(locSvIndex), - nextHeight: head.head.height + 1, - offset: time.Duration(0), // TODO(leventeliu): initialize offset + bpInfos: bpInfos, + localBPInfo: localBPInfo, + localNodeID: cfg.NodeID, + confirms: m, + nextHeight: head.head.height + 1, + offset: time.Duration(0), // TODO(leventeliu): initialize offset lastIrre: irre, immutable: immutable, @@ -254,11 +251,10 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) txPool: txPool, } log.WithFields(log.Fields{ - "index": c.locSvIndex, - "bp_number": c.serversNum, - "period": c.period.String(), - "tick": c.tick.String(), - "height": c.head().height, + "local": c.getLocalBPInfo().String(), + "period": c.period.String(), + "tick": c.tick.String(), + "height": c.head().height, }).Debug("current chain state") return } @@ -283,11 +279,14 @@ func (c *Chain) Start() { // Stop stops the main process of the sql-chain. func (c *Chain) Stop() (err error) { // Stop main process - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("stopping chain") + var le = log.WithFields(log.Fields{ + "local": c.getLocalBPInfo().String(), + }) + le.Debug("stopping chain") c.stop() - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("chain service stopped") + le.Debug("chain service stopped") c.st.Close() - log.WithFields(log.Fields{"peer": c.peerInfo()}).Debug("chain database closed") + le.Debug("chain database closed") // FIXME(leventeliu): RPC server should provide an `unregister` method to detach chain service // instance. Add it to Chain.stop(), then working channels can be closed safely. @@ -345,35 +344,16 @@ func (c *Chain) produceBlock(now time.Time) (err error) { if b, err = c.produceAndStoreBlock(now, priv); err != nil { return } - log.WithField("block", b).Debug("produced new block") - - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - func(id proto.NodeID) { - c.goFuncWithTimeout(func(ctx context.Context) { - var ( - req = &types.AdviseNewBlockReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Block: b, - } - resp = &types.AdviseNewBlockResp{} - err = c.cl.CallNodeWithContext( - ctx, id, route.MCCAdviseNewBlock.String(), req, resp) - ) - log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "block_time": b.Timestamp(), - "block_hash": b.BlockHash().Short(4), - "parent_hash": b.ParentHash().Short(4), - }).WithError(err).Debug("broadcasting new block to other peers") - }, c.period) - }(s) - } - } - return err + + log.WithFields(log.Fields{ + "block_time": b.Timestamp(), + "block_hash": b.BlockHash().Short(4), + "parent_hash": b.ParentHash().Short(4), + }).Debug("produced new block") + + // Broadcast to other block producers + c.nonblockingBroadcastBlock(b) + return } // advanceNextHeight does the check and runs block producing if its my turn. @@ -381,8 +361,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { var elapsed = -d log.WithFields(log.Fields{ - "bp_number": c.serversNum, - "node_index": c.locSvIndex, + "local": c.getLocalBPInfo().String(), "enclosing_height": c.getNextHeight() - 1, "using_timestamp": now.Format(time.RFC3339Nano), "elapsed_seconds": elapsed.Seconds(), @@ -450,66 +429,68 @@ func (c *Chain) processBlocks(ctx context.Context) { } } -func (c *Chain) addTx(tx pi.Transaction) { +func (c *Chain) addTx(req *types.AddTxReq) { select { - case c.pendingTxs <- tx: + case c.pendingAddTxReqs <- req: case <-c.ctx.Done(): log.WithError(c.ctx.Err()).Warn("add transaction aborted") } } -func (c *Chain) processTx(tx pi.Transaction) { - if err := tx.Verify(); err != nil { - log.WithError(err).Errorf("failed to verify transaction with hash: %s, address: %s, tx type: %s", - tx.Hash(), tx.GetAccountAddress(), tx.GetTransactionType().String()) +func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { + // Nil check + if addTxReq == nil || addTxReq.Tx == nil { + log.Warn("empty add tx request") return } + + var ( + ttl = addTxReq.TTL + tx = addTxReq.Tx + le = log.WithFields(log.Fields{ + "hash": tx.Hash().Short(4), + "address": tx.GetAccountAddress(), + "type": tx.GetTransactionType().String(), + }) + err error + ) + + // Existense check if ok := func() (ok bool) { c.RLock() defer c.RUnlock() _, ok = c.txPool[tx.Hash()] return }(); ok { - log.WithFields(log.Fields{ - "tx_hash": tx.Hash().Short(4), - }).Debug("tx already exists, abort processing") + le.Debug("tx already exists, abort processing") return } - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - func(id proto.NodeID) { - c.goFuncWithTimeout(func(ctx context.Context) { - var ( - req = &types.AddTxReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Tx: tx, - } - resp = &types.AddTxResp{} - err = c.cl.CallNodeWithContext( - ctx, id, route.MCCAddTx.String(), req, resp) - ) - log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "tx_hash": tx.Hash().Short(4), - "tx_type": tx.GetTransactionType(), - }).WithError(err).Debug("broadcasting transaction to other peers") - }, c.tick) - }(s) - } + + // Verify transaction + if err = tx.Verify(); err != nil { + le.WithError(err).Warn("failed to verify transaction") + return } - if err := c.storeTx(tx); err != nil { - log.WithError(err).Error("failed to add transaction") + + // Broadcast to other block producers + if ttl > pl.MaxTxBroadcastTTL { + ttl = pl.MaxTxBroadcastTTL + } + if ttl > 0 { + c.nonblockingBroadcastTx(ttl-1, tx) + } + + // Add to tx pool + if err = c.storeTx(tx); err != nil { + le.WithError(err).Error("failed to add transaction") } } func (c *Chain) processTxs(ctx context.Context) { for { select { - case tx := <-c.pendingTxs: - c.processTx(tx) + case addTxReq := <-c.pendingAddTxReqs: + c.processAddTxReq(addTxReq) case <-ctx.Done(): log.WithError(c.ctx.Err()).Info("abort transaction processing") return @@ -540,7 +521,7 @@ func (c *Chain) mainCycle(ctx context.Context) { c.advanceNextHeight(t, d) } else { log.WithFields(log.Fields{ - "peer": c.peerInfo(), + "peer": c.getLocalBPInfo().String(), "next_height": c.getNextHeight(), "head_height": c.head().height, "head_block": c.head().hash.Short(4), @@ -588,76 +569,26 @@ func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { ok = true return } + // Initiate blocking gossip calls to fetch block of the current height, // with timeout of one tick. var ( - wg = &sync.WaitGroup{} - cld, ccl = context.WithTimeout(ctx, c.tick) - unreachable uint32 - ) - defer func() { - wg.Wait() - ccl() - var needConfirms, serversNum = func() (cf, sn uint32) { + unreachable = c.blockingFetchBlock(ctx, h) + + needConfirms, serversNum = func() (cf, sn uint32) { c.RLock() defer c.RUnlock() - cf, sn = c.confirms, c.serversNum + cf, sn = c.confirms, c.localBPInfo.total return }() - if unreachable+needConfirms > serversNum { - log.WithFields(log.Fields{ - "peer": c.peerInfo(), - "sync_head_height": h, - "unreachable_count": unreachable, - }).Warn("one or more block producers are currently unreachable") - ok = false - } else { - ok = true - } - }() - for _, s := range c.getPeers().Servers { - if !s.IsEqual(&c.nodeID) { - wg.Add(1) - go func(id proto.NodeID) { - defer wg.Done() - var ( - err error - req = &types.FetchBlockReq{ - Envelope: proto.Envelope{ - // TODO(lambda): Add fields. - }, - Height: h, - } - resp = &types.FetchBlockResp{} - ) - var le = log.WithFields(log.Fields{ - "local": c.peerInfo(), - "remote": id, - "height": h, - }) - if err = c.cl.CallNodeWithContext( - cld, id, route.MCCFetchBlock.String(), req, resp, - ); err != nil { - le.WithError(err).Warn("failed to fetch block") - atomic.AddUint32(&unreachable, 1) - return - } - if resp.Block == nil { - le.Debug("fetch block request reply: no such block") - return - } - // Push new block from other peers - le.WithFields(log.Fields{ - "parent": resp.Block.ParentHash().Short(4), - "hash": resp.Block.BlockHash().Short(4), - }).Debug("fetch block request reply: found block") - select { - case c.pendingBlocks <- resp.Block: - case <-cld.Done(): - log.WithError(cld.Err()).Warn("add pending block aborted") - } - }(s) - } + ) + + if ok = unreachable+needConfirms <= serversNum; !ok { + log.WithFields(log.Fields{ + "peer": c.getLocalBPInfo().String(), + "sync_head_height": h, + "unreachable_count": unreachable, + }).Warn("one or more block producers are currently unreachable") } return } @@ -938,7 +869,7 @@ func (c *Chain) nextTick() (t time.Time, d time.Duration) { func (c *Chain) isMyTurn() bool { c.RLock() defer c.RUnlock() - return c.nextHeight%c.serversNum == c.locSvIndex + return c.nextHeight%c.localBPInfo.total == c.localBPInfo.rank } // increaseNextHeight prepares the chain state for the next turn. @@ -948,15 +879,6 @@ func (c *Chain) increaseNextHeight() { c.nextHeight++ } -func (c *Chain) peerInfo() string { - var index, bpNum, nodeID = func() (uint32, uint32, proto.NodeID) { - c.RLock() - defer c.RUnlock() - return c.locSvIndex, c.serversNum, c.nodeID - }() - return fmt.Sprintf("[%d/%d] %s", index, bpNum, nodeID) -} - // heightOfTime calculates the heightOfTime with this sql-chain config of a given time reading. func (c *Chain) heightOfTime(t time.Time) uint32 { return uint32(t.Sub(c.genesisTime) / c.period) @@ -968,11 +890,22 @@ func (c *Chain) getNextHeight() uint32 { return c.nextHeight } -func (c *Chain) getPeers() *proto.Peers { +func (c *Chain) getLocalBPInfo() *blockProducerInfo { c.RLock() defer c.RUnlock() - var peers = c.peers.Clone() - return &peers + return c.localBPInfo +} + +func (c *Chain) getRemoteBPInfos() (remoteBPInfos []*blockProducerInfo) { + var localBPInfo, bpInfos = func() (*blockProducerInfo, []*blockProducerInfo) { + c.RLock() + defer c.RUnlock() + return c.localBPInfo, c.bpInfos + }() + remoteBPInfos = make([]*blockProducerInfo, 0, localBPInfo.total-1) + remoteBPInfos = append(remoteBPInfos, bpInfos[0:localBPInfo.rank]...) + remoteBPInfos = append(remoteBPInfos, bpInfos[localBPInfo.rank+1:]...) + return } func (c *Chain) lastIrreversibleBlock() *blockNode { diff --git a/blockproducer/chain_gossip.go b/blockproducer/chain_gossip.go new file mode 100644 index 000000000..9632d3303 --- /dev/null +++ b/blockproducer/chain_gossip.go @@ -0,0 +1,136 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package blockproducer + +import ( + "context" + "sync" + "sync/atomic" + + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func (c *Chain) nonblockingBroadcastBlock(block *types.BPBlock) { + for _, info := range c.getRemoteBPInfos() { + func(remote *blockProducerInfo) { + c.goFuncWithTimeout(func(ctx context.Context) { + var ( + req = &types.AdviseNewBlockReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + Block: block, + } + err = c.cl.CallNodeWithContext( + ctx, remote.nodeID, route.MCCAdviseNewBlock.String(), req, nil) + ) + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo().String(), + "remote": remote.String(), + "block_time": block.Timestamp(), + "block_hash": block.BlockHash().Short(4), + "parent_hash": block.ParentHash().Short(4), + }).WithError(err).Debug("broadcast new block to other peers") + }, c.period) + }(info) + } +} + +func (c *Chain) nonblockingBroadcastTx(ttl uint32, tx pi.Transaction) { + for _, info := range c.getRemoteBPInfos() { + func(remote *blockProducerInfo) { + c.goFuncWithTimeout(func(ctx context.Context) { + var ( + req = &types.AddTxReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + TTL: ttl, + Tx: tx, + } + err = c.cl.CallNodeWithContext( + ctx, remote.nodeID, route.MCCAddTx.String(), req, nil) + ) + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo().String(), + "remote": remote.String(), + "hash": tx.Hash().Short(4), + "address": tx.GetAccountAddress(), + "type": tx.GetTransactionType().String(), + }).WithError(err).Debug("broadcast transaction to other peers") + }, c.tick) + }(info) + } +} + +func (c *Chain) blockingFetchBlock(ctx context.Context, h uint32) (unreachable uint32) { + var ( + cld, ccl = context.WithTimeout(ctx, c.tick) + wg = &sync.WaitGroup{} + ) + defer func() { + wg.Wait() + ccl() + }() + for _, info := range c.getRemoteBPInfos() { + wg.Add(1) + go func(remote *blockProducerInfo) { + defer wg.Done() + var ( + err error + req = &types.FetchBlockReq{ + Envelope: proto.Envelope{ + // TODO(lambda): Add fields. + }, + Height: h, + } + resp = &types.FetchBlockResp{} + ) + var le = log.WithFields(log.Fields{ + "local": c.getLocalBPInfo().String(), + "remote": remote.String(), + "height": h, + }) + if err = c.cl.CallNodeWithContext( + cld, remote.nodeID, route.MCCFetchBlock.String(), req, resp, + ); err != nil { + le.WithError(err).Warn("failed to fetch block") + atomic.AddUint32(&unreachable, 1) + return + } + if resp.Block == nil { + le.Debug("fetch block request reply: no such block") + return + } + // Push new block from other peers + le.WithFields(log.Fields{ + "parent": resp.Block.ParentHash().Short(4), + "hash": resp.Block.BlockHash().Short(4), + }).Debug("fetch block request reply: found block") + select { + case c.pendingBlocks <- resp.Block: + case <-cld.Done(): + log.WithError(cld.Err()).Warn("add pending block aborted") + } + }(info) + } + return +} diff --git a/blockproducer/chain_service.go b/blockproducer/chain_io.go similarity index 100% rename from blockproducer/chain_service.go rename to blockproducer/chain_io.go diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 1e4afe108..61e41668b 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -348,10 +348,10 @@ func TestChain(t *testing.T) { chain.Stop() chain = nil }() - chain.addTx(t1) - chain.addTx(t2) - chain.addTx(t3) - chain.addTx(t4) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t1}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t2}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t3}) + chain.addTx(&types.AddTxReq{TTL: 1, Tx: t4}) time.Sleep(15 * chain.period) }) }) diff --git a/blockproducer/limits/limits.go b/blockproducer/limits/limits.go new file mode 100644 index 000000000..782b69e99 --- /dev/null +++ b/blockproducer/limits/limits.go @@ -0,0 +1,24 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Package limits defines limits of the CovenantSQL system. +package limits + +const ( + // MaxTxBroadcastTTL defines the TTL limit of a AddTx request broadcasting within the + // block producers. + MaxTxBroadcastTTL = 1 +) diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 0225b1ab2..0b3c5cdee 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -29,7 +29,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" ) @@ -99,13 +98,8 @@ func (s *ChainRPCService) NextAccountNonce( } // AddTx is the RPC method to add a transaction. -func (s *ChainRPCService) AddTx(req *types.AddTxReq, resp *types.AddTxResp) (err error) { - if req.Tx == nil { - return ErrUnknownTransactionType - } - log.Infof("transaction type: %s, hash: %s, address: %s", - req.Tx.GetTransactionType().String(), req.Tx.Hash(), req.Tx.GetAccountAddress()) - s.chain.addTx(req.Tx) +func (s *ChainRPCService) AddTx(req *types.AddTxReq, _ *types.AddTxResp) (err error) { + s.chain.addTx(req) return } @@ -195,14 +189,13 @@ func WaitBPChainService(ctx context.Context, period time.Duration) (err error) { req = &types.FetchBlockReq{ Height: 0, // Genesis block } - resp = &types.FetchTxBillingResp{} ) defer ticker.Stop() for { select { case <-ticker.C: if err = rpc.RequestBP( - route.MCCFetchBlock.String(), req, resp, + route.MCCFetchBlock.String(), req, nil, ); err == nil || !strings.Contains(err.Error(), "can't find service") { return } diff --git a/client/driver.go b/client/driver.go index 7bdf10a81..59317df6d 100644 --- a/client/driver.go +++ b/client/driver.go @@ -162,6 +162,7 @@ func Create(meta ResourceMeta) (dsn string, err error) { meta.AdvancePayment = DefaultAdvancePayment } + req.TTL = 1 req.Tx = types.NewCreateDatabase(&types.CreateDatabaseHeader{ Owner: clientAddr, ResourceMeta: meta.ResourceMeta, diff --git a/cmd/cql-faucet/verifier.go b/cmd/cql-faucet/verifier.go index b404a6f1c..b1f144c18 100644 --- a/cmd/cql-faucet/verifier.go +++ b/cmd/cql-faucet/verifier.go @@ -235,7 +235,7 @@ func (v *Verifier) dispenseOne(r *applicationRecord) (err error) { // decode target account address var targetAddress proto.AccountAddress - req := &pt.AddTxReq{} + req := &pt.AddTxReq{TTL: 1} resp := &pt.AddTxResp{} req.Tx = pt.NewTransfer( &pt.TransferHeader{ diff --git a/cmd/cql-minerd/provide_service.go b/cmd/cql-minerd/provide_service.go index a2e0f78b5..0537143d7 100644 --- a/cmd/cql-minerd/provide_service.go +++ b/cmd/cql-minerd/provide_service.go @@ -158,6 +158,7 @@ func sendProvideService(reg *prometheus.Registry) { return } + req.TTL = 1 req.Tx = tx if err = rpc.RequestBP(route.MCCAddTx.String(), req, resp); err != nil { diff --git a/cmd/cql-utils/rpc.go b/cmd/cql-utils/rpc.go index 775af1b97..1f868e964 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql-utils/rpc.go @@ -83,6 +83,7 @@ func runRPC() { if rpcName == route.MCCAddTx.String() { // special type of query if addTxReqType, ok := req.(*types.AddTxReq); ok { + addTxReqType.TTL = 1 addTxReqType.Tx = &pi.TransactionWrapper{} } } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cd314ef99..ec76d0abe 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -914,7 +914,7 @@ func (c *Chain) processBlocks(ctx context.Context) { log.WithError(err).Warning("sign tx failed") } - addTxReq := &types.AddTxReq{} + addTxReq := &types.AddTxReq{TTL: 1} addTxResp := &types.AddTxResp{} addTxReq.Tx = ub log.Debugf("nonce in processBlocks: %d, addr: %s", diff --git a/types/bprpc.go b/types/bprpc.go index 786996d60..329b1734e 100644 --- a/types/bprpc.go +++ b/types/bprpc.go @@ -104,7 +104,9 @@ type NextAccountNonceResp struct { // AddTxReq defines a request of the AddTx RPC method. type AddTxReq struct { proto.Envelope - Tx interfaces.Transaction + + TTL uint32 // defines the broadcast TTL on BP network. + Tx interfaces.Transaction } // AddTxResp defines a response of the AddTx RPC method. From e6eea292c6097d0e18c9c28cf3c10a4151d49dbc Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 4 Jan 2019 16:15:58 +0800 Subject: [PATCH 023/302] Add accept log, mute useless logs --- cmd/cql-utils/confgen.go | 2 +- cmd/cql-utils/keygen.go | 2 +- crypto/kms/pubkeystore.go | 18 +++++++++--------- rpc/client.go | 6 +++--- rpc/pool.go | 4 ++-- rpc/rpcutil.go | 2 +- rpc/server.go | 1 + 7 files changed, 18 insertions(+), 17 deletions(-) diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index 32583ea72..cc7aa8ee6 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -52,7 +52,7 @@ func runConfgen() { if _, err := os.Stat(workingRoot); err == nil { reader := bufio.NewReader(os.Stdin) fmt.Printf("The directory \"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", - workingRoot) + workingRoot) t, err := reader.ReadString('\n') t = strings.Trim(t, "\n") if err != nil { diff --git a/cmd/cql-utils/keygen.go b/cmd/cql-utils/keygen.go index 30bea4811..e1541541f 100644 --- a/cmd/cql-utils/keygen.go +++ b/cmd/cql-utils/keygen.go @@ -32,7 +32,7 @@ func runKeygen() *asymmetric.PublicKey { if _, err := os.Stat(privateKeyFile); err == nil { reader := bufio.NewReader(os.Stdin) fmt.Printf("Private key file \"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", - privateKeyFile) + privateKeyFile) t, err := reader.ReadString('\n') t = strings.Trim(t, "\n") if err != nil { diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 2719a707c..10a2a41a5 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -17,7 +17,6 @@ package kms import ( - "errors" "os" "path/filepath" "runtime" @@ -32,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" bolt "github.com/coreos/bbolt" + "github.com/pkg/errors" ) // PublicKeyStore holds db and bucket name @@ -145,7 +145,7 @@ func InitPublicKeyStore(dbPath string, initNodes []proto.Node) (err error) { for _, n := range initNodes { err = setNode(&n) if err != nil { - log.WithError(err).Error("set init nodes failed") + err = errors.Wrap(err, "set init nodes failed") return } } @@ -186,7 +186,7 @@ func GetNodeInfo(id proto.NodeID) (nodeInfo *proto.Node, err error) { return err // return from View func }) if err != nil { - log.WithError(err).Error("get node info failed") + err = errors.Wrap(err, "get node info failed") } return } @@ -210,7 +210,7 @@ func GetAllNodeID() (nodeIDs []proto.NodeID, err error) { return err // return from View func }) if err != nil { - log.WithError(err).Error("get all node id failed") + err = errors.Wrap(err, "get all node id failed") } return @@ -260,7 +260,7 @@ func setNode(nodeInfo *proto.Node) (err error) { nodeBuf, err := utils.EncodeMsgPack(nodeInfo) if err != nil { - log.WithError(err).Error("marshal node info failed") + err = errors.Wrap(err, "marshal node info failed") return } log.Debugf("set node: %#v", nodeInfo) @@ -273,7 +273,7 @@ func setNode(nodeInfo *proto.Node) (err error) { return bucket.Put([]byte(nodeInfo.ID), nodeBuf.Bytes()) }) if err != nil { - log.WithError(err).Error("get node info failed") + err = errors.Wrap(err, "get node info failed") } return @@ -295,7 +295,7 @@ func DelNode(id proto.NodeID) (err error) { return bucket.Delete([]byte(id)) }) if err != nil { - log.WithError(err).Error("del node failed") + err = errors.Wrap(err, "del node failed") } return } @@ -309,7 +309,7 @@ func removeBucket() (err error) { return tx.DeleteBucket([]byte(kmsBucketName)) }) if err != nil { - log.WithError(err).Error("remove bucket failed") + err = errors.Wrap(err, "remove bucket failed") return } // ks.bucket == nil means bucket not exist @@ -332,7 +332,7 @@ func ResetBucket() error { }) pks.bucket = bucketName if err != nil { - log.WithError(err).Error("reset bucket failed") + err = errors.Wrap(err, "reset bucket failed") } return err diff --git a/rpc/client.go b/rpc/client.go index b3bc19165..cfdf15593 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -123,7 +123,7 @@ func DialToNode(nodeID proto.NodeID, pool *SessionPool, isAnonymous bool) (conn } return } - log.WithField("poolSize", pool.Len()).Debug("session pool size") + //log.WithField("poolSize", pool.Len()).Debug("session pool size") conn, err = pool.Get(nodeID) return } @@ -218,6 +218,6 @@ func InitClientConn(conn net.Conn) (client *Client, err error) { // Close the client RPC connection func (c *Client) Close() { - log.WithField("addr", c.RemoteAddr).Debug("closing client") - c.Client.Close() + //log.WithField("addr", c.RemoteAddr).Debug("closing client") + _ = c.Client.Close() } diff --git a/rpc/pool.go b/rpc/pool.go index c27f1d2e5..ad3f3c90f 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -129,14 +129,14 @@ func (p *SessionPool) Get(id proto.NodeID) (conn net.Conn, err error) { if ok { conn, err = cachedConn.Sess.OpenStream() if err == nil { - log.WithField("node", id).Debug("reusing session") + //log.WithField("node", id).Debug("reusing session") return } log.WithField("target", id).WithError(err).Error("open session failed") p.Remove(id) } - log.WithField("target", id).Debug("dialing new session") + //log.WithField("target", id).Debug("dialing new session") // Can't find existing Session, try to dial one newConn, err := p.nodeDialer(id) if err != nil { diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 96e2aca11..c5ee4329e 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -198,7 +198,7 @@ func (c *Caller) CallNodeWithContext( func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { addr, err = route.GetNodeAddrCache(id) if err != nil { - log.WithField("target", id.String()).WithError(err).Info("get node addr from cache failed") + //log.WithField("target", id.String()).WithError(err).Debug("get node addr from cache failed") if err == route.ErrUnknownNodeID { BPs := route.GetBPs() if len(BPs) == 0 { diff --git a/rpc/server.go b/rpc/server.go index 30d1291e7..be84a6091 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -108,6 +108,7 @@ serverLoop: break serverLoop default: conn, err := s.Listener.Accept() + log.WithField("remote", conn.RemoteAddr().String()).Infof("accept") if err != nil { continue } From e434295555619131688522c3bd2689a5b553debd Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 4 Jan 2019 16:48:01 +0800 Subject: [PATCH 024/302] Fix typo --- rpc/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/server.go b/rpc/server.go index be84a6091..693f3771a 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -108,10 +108,10 @@ serverLoop: break serverLoop default: conn, err := s.Listener.Accept() - log.WithField("remote", conn.RemoteAddr().String()).Infof("accept") if err != nil { continue } + log.WithField("remote", conn.RemoteAddr().String()).Infof("accept") go s.handleConn(conn) } } From d895572eacc924036d024df2fc736452a61aac51 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 4 Jan 2019 16:54:51 +0800 Subject: [PATCH 025/302] Add RPC method for tx state query --- blockproducer/bpinfo.go | 6 +++--- blockproducer/branch.go | 12 ++++++++++++ blockproducer/chain_io.go | 12 ++++++++++++ blockproducer/interfaces/transaction.go | 21 ++++++++++++++++++++- blockproducer/rpc.go | 13 +++++++++++++ types/bprpc.go | 13 +++++++++++++ 6 files changed, 73 insertions(+), 4 deletions(-) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index cb925448b..710ee452e 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -31,7 +31,7 @@ type blockProducerInfo struct { // String implements fmt.Stringer. func (i *blockProducerInfo) String() string { - return fmt.Sprintf("[%d/%d] (%s) %s", i.rank+1, i.total, i.role, i.nodeID) + return fmt.Sprintf("[%d/%d|%s] %s", i.rank+1, i.total, i.role, i.nodeID) } func newBlockProduerInfos( @@ -52,9 +52,9 @@ func newBlockProduerInfos( bpInfos = make([]*blockProducerInfo, total) for i, v := range peers.PeersHeader.Servers { - var role = "F" + var role = "Follower" if v == peers.Leader { - role = "L" + role = "Leader" } bpInfos[i] = &blockProducerInfo{ rank: uint32(i), diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 8c511ccef..4a66f4923 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -214,6 +214,18 @@ func (b *branch) clearPackedTxs(txs []pi.Transaction) { } } +func (b *branch) queryTx(hash hash.Hash) (state pi.TransactionState, ok bool) { + if _, ok = b.unpacked[hash]; ok { + state = pi.TransactionStatePending + return + } + if _, ok = b.packed[hash]; ok { + state = pi.TransactionStatePacked + return + } + return +} + func (b *branch) sprint(from uint32) (buff string) { var nodes = b.head.fetchNodeList(from) for i, v := range nodes { diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index 44dfcb237..f009f8f6a 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -140,3 +140,15 @@ func (c *Chain) loadSQLChainProfiles(addr proto.AccountAddress) []*types.SQLChai defer c.RUnlock() return c.immutable.loadROSQLChains(addr) } + +func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err error) { + c.RLock() + defer c.RUnlock() + var ok bool + state = pi.TransactionStateNotFound + if state, ok = c.headBranch.queryTx(hash); ok { + return + } + // TODO(leventeliu): get confirmed state from tx history. + return +} diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index bd936c414..18e0bd1a9 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -29,7 +29,7 @@ import ( // AccountNonce defines the an account nonce. type AccountNonce uint32 -// TransactionType defines an transaction type. +// TransactionType defines a transaction type. type TransactionType uint32 // Bytes encodes a TransactionType to a byte slice. @@ -108,6 +108,25 @@ func (t TransactionType) String() string { } } +// TransactionState defines a transaction state. +type TransactionState uint32 + +// Transaction state transition: +// [o] ---[ Add ]--> Pending ---[ Produce Block ]--> Packed ---[ Irreversible ]--> Confirmed +// | | x +// | x +------[ Prune ]--> Not Found +// x | +// | +------------------------------------[ Expire ]--> Expired +// | +// +----------------------------------------------------------------------> Not Found +const ( + TransactionStatePending TransactionState = iota + TransactionStatePacked + TransactionStateConfirmed + TransactionStateExpired + TransactionStateNotFound +) + // Transaction is the interface implemented by an object that can be verified and processed by // block producers. type Transaction interface { diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 0b3c5cdee..b503ed67a 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -23,6 +23,7 @@ import ( "strings" "time" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/proto" @@ -133,6 +134,18 @@ func (s *ChainRPCService) QuerySQLChainProfile(req *types.QuerySQLChainProfileRe return } +func (s *ChainRPCService) QueryTxState( + req *types.QueryTxStateReq, resp *types.QueryTxStateResp) (err error, +) { + var state pi.TransactionState + if state, err = s.chain.queryTxState(req.Hash); err != nil { + return + } + resp.Hash = req.Hash + resp.State = state + return +} + // Sub is the RPC method to subscribe some event. func (s *ChainRPCService) Sub(req *types.SubReq, resp *types.SubResp) (err error) { return s.chain.bs.Subscribe(req.Topic, func(request interface{}, response interface{}) { diff --git a/types/bprpc.go b/types/bprpc.go index 329b1734e..66a6a2067 100644 --- a/types/bprpc.go +++ b/types/bprpc.go @@ -18,6 +18,8 @@ package types import ( "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -183,3 +185,14 @@ type QuerySQLChainProfileResp struct { proto.Envelope Profile SQLChainProfile } + +type QueryTxStateReq struct { + proto.Envelope + Hash hash.Hash +} + +type QueryTxStateResp struct { + proto.Envelope + Hash hash.Hash + State pi.TransactionState +} From 034d0d05b5476ebb77849da67b45efc470d63380 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 4 Jan 2019 17:16:03 +0800 Subject: [PATCH 026/302] Add comments for GoDoc --- blockproducer/rpc.go | 1 + types/bprpc.go | 2 ++ 2 files changed, 3 insertions(+) diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 79294b0ed..5ac51a0a4 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -134,6 +134,7 @@ func (s *ChainRPCService) QuerySQLChainProfile(req *types.QuerySQLChainProfileRe return } +// QueryTxState is the RPC method to query a transaction state. func (s *ChainRPCService) QueryTxState( req *types.QueryTxStateReq, resp *types.QueryTxStateResp) (err error, ) { diff --git a/types/bprpc.go b/types/bprpc.go index 66a6a2067..70a7d529f 100644 --- a/types/bprpc.go +++ b/types/bprpc.go @@ -186,11 +186,13 @@ type QuerySQLChainProfileResp struct { Profile SQLChainProfile } +// QueryTxStateReq defines a request of the QueryTxState RPC method. type QueryTxStateReq struct { proto.Envelope Hash hash.Hash } +// QueryTxStateResp defines a response of the QueryTxState RPC method. type QueryTxStateResp struct { proto.Envelope Hash hash.Hash From a8ace27da59180ff32123a9f5f65d4ddf5f10f6e Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 4 Jan 2019 18:00:13 +0800 Subject: [PATCH 027/302] Add transaction expiration --- blockproducer/branch.go | 6 ++++++ blockproducer/chain.go | 40 ++++++++++++++++++++++++++++++++++------ 2 files changed, 40 insertions(+), 6 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 4a66f4923..149197d58 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -214,6 +214,12 @@ func (b *branch) clearPackedTxs(txs []pi.Transaction) { } } +func (b *branch) clearUnpackedTxs(txs []pi.Transaction) { + for _, v := range txs { + delete(b.unpacked, v.Hash()) + } +} + func (b *branch) queryTx(hash hash.Hash) (state pi.TransactionState, ok bool) { if _, ok = b.unpacked[hash]; ok { state = pi.TransactionStatePending diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 1420ea5cc..0414506f3 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -619,6 +619,9 @@ func (c *Chain) replaceAndSwitchToBranch( sps []storageProcedure up storageCallback height = c.heightOfTime(newBlock.Timestamp()) + + resultTxPool = make(map[hash.Hash]pi.Transaction) + expiredTxs []pi.Transaction ) // Find new irreversible blocks @@ -630,13 +633,33 @@ func (c *Chain) replaceAndSwitchToBranch( newIrres = lastIrre.fetchNodeList(c.lastIrre.count) // Apply irreversible blocks to create dirty map on immutable cache - // - // TODO(leventeliu): use old metaState for now, better use separated dirty cache. + for k, v := range c.txPool { + resultTxPool[k] = v + } for _, b := range newIrres { for _, tx := range b.block.Transactions { if err := c.immutable.apply(tx); err != nil { log.WithError(err).Fatal("failed to apply block to immutable database") } + delete(resultTxPool, tx.Hash()) // Remove confirmed transaction + } + } + + // Check tx expiration + for k, v := range resultTxPool { + if base, err := c.immutable.nextNonce( + v.GetAccountAddress(), + ); err != nil || v.GetAccountNonce() < base { + log.WithFields(log.Fields{ + "hash": k.Short(4), + "type": v.GetTransactionType().String(), + "account": v.GetAccountAddress(), + "nonce": v.GetAccountNonce(), + + "immutable_base_nonce": base, + }).Debug("transaction expired") + expiredTxs = append(expiredTxs, v) + delete(resultTxPool, k) // Remove expired transaction } } @@ -666,6 +689,9 @@ func (c *Chain) replaceAndSwitchToBranch( for _, n := range newIrres { sps = append(sps, deleteTxs(n.block.Transactions)) } + if len(expiredTxs) > 0 { + sps = append(sps, deleteTxs(expiredTxs)) + } sps = append(sps, updateIrreversible(lastIrre.hash)) // Prepare callback to update cache @@ -704,15 +730,17 @@ func (c *Chain) replaceAndSwitchToBranch( c.headBranch = newBranch c.headIndex = idx c.branches = brs - // Clear packed transactions + // Clear transactions in each branch for _, b := range newIrres { for _, br := range c.branches { br.clearPackedTxs(b.block.Transactions) } - for _, tx := range b.block.Transactions { - delete(c.txPool, tx.Hash()) - } } + for _, br := range c.branches { + br.clearUnpackedTxs(expiredTxs) + } + // Update txPool to result txPool (packed and expired transactions cleared!) + c.txPool = resultTxPool } // Write to immutable database and update cache From cb4545107a91e8d495d350642c0a41df5b0c00a7 Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 4 Jan 2019 18:08:57 +0800 Subject: [PATCH 028/302] Fix tx.GetAccountAddress() in log --- blockproducer/chain.go | 2 +- blockproducer/rpc.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index cea195eda..0518ddf8f 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -461,7 +461,7 @@ func (c *Chain) addTx(tx pi.Transaction) { func (c *Chain) processTx(tx pi.Transaction) { if err := tx.Verify(); err != nil { log.WithError(err).Errorf("failed to verify transaction with hash: %s, address: %s, tx type: %s", - tx.Hash(), tx.GetAccountAddress(), tx.GetTransactionType().String()) + tx.Hash(), tx.GetAccountAddress().String(), tx.GetTransactionType().String()) return } if ok := func() (ok bool) { diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index ac1b2e47f..01fd2d081 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -104,7 +104,7 @@ func (s *ChainRPCService) AddTx(req *types.AddTxReq, resp *types.AddTxResp) (err return ErrUnknownTransactionType } log.Infof("transaction type: %s, hash: %s, address: %s", - req.Tx.GetTransactionType().String(), req.Tx.Hash(), req.Tx.GetAccountAddress()) + req.Tx.GetTransactionType().String(), req.Tx.Hash(), req.Tx.GetAccountAddress().String()) s.chain.addTx(req.Tx) return } From c3fc326af6ef46baf5eb9e6760d68f0fb6198429 Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 4 Jan 2019 18:18:47 +0800 Subject: [PATCH 029/302] Fix func (z *AccountAddress) String() string to func (z AccountAddress) String() string --- proto/nodeinfo.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 32b315d7d..9d19fa359 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -83,8 +83,8 @@ func (z *AccountAddress) Msgsize() (s int) { } // String is a string variable. -func (z *AccountAddress) String() string { - return (*hash.Hash)(z).String() +func (z AccountAddress) String() string { + return (hash.Hash)(z).String() } // Less return true if k is less than y. From 9e8fc2dcebaf04d96a89b82131e04490326b4e7d Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 4 Jan 2019 18:21:14 +0800 Subject: [PATCH 030/302] Fix unit test --- crypto/kms/pubkeystore_test.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index 707f839db..911ba6a77 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -26,6 +26,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" yaml "gopkg.in/yaml.v2" ) @@ -71,7 +72,7 @@ func TestDB(t *testing.T) { pubk, err = GetPublicKey(proto.NodeID("99999999")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) err = SetNode(nil) So(err, ShouldEqual, ErrNilNode) @@ -116,31 +117,31 @@ func TestDB(t *testing.T) { pubk, err = GetPublicKey(proto.NodeID("2222")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) err = removeBucket() So(err, ShouldBeNil) pubk, err = GetPublicKey(proto.NodeID("not exist")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = setNode(node1) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = DelNode(proto.NodeID("2222")) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) - So(err, ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) err = ResetBucket() So(err, ShouldBeNil) pubk, err = GetPublicKey(proto.NodeID("2222")) So(pubk, ShouldBeNil) - So(err, ShouldEqual, ErrKeyNotFound) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) From 4f254d7f15cc9c884ee35e44f2772cb3b1b56bb9 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 4 Jan 2019 18:22:25 +0800 Subject: [PATCH 031/302] Add nonce limit --- blockproducer/chain.go | 30 +++++++++++++++++++++++++----- blockproducer/chain_io.go | 6 ++++++ blockproducer/limits/limits.go | 2 ++ 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 0414506f3..9dabde41d 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -447,19 +447,27 @@ func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { var ( ttl = addTxReq.TTL tx = addTxReq.Tx - le = log.WithFields(log.Fields{ - "hash": tx.Hash().Short(4), - "address": tx.GetAccountAddress(), + + txhash = tx.Hash() + addr = tx.GetAccountAddress() + nonce = tx.GetAccountNonce() + + le = log.WithFields(log.Fields{ + "hash": txhash.Short(4), + "address": addr, + "nonce": nonce, "type": tx.GetTransactionType().String(), }) - err error + + base pi.AccountNonce + err error ) // Existense check if ok := func() (ok bool) { c.RLock() defer c.RUnlock() - _, ok = c.txPool[tx.Hash()] + _, ok = c.txPool[txhash] return }(); ok { le.Debug("tx already exists, abort processing") @@ -471,6 +479,18 @@ func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { le.WithError(err).Warn("failed to verify transaction") return } + if base, err = c.immutableNextNonce(addr); err != nil { + le.WithError(err).Warn("failed to load base nonce of transaction account") + return + } + if nonce < base || nonce >= nonce+pl.MaxPendingTxsPerAccount { + // TODO(leventeliu): should persist to some where for tx query? + le.WithFields(log.Fields{ + "base_nonce": base, + "pending_limit": pl.MaxPendingTxsPerAccount, + }).Warn("invalid transaction nonce") + return + } // Broadcast to other block producers if ttl > pl.MaxTxBroadcastTTL { diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index f009f8f6a..57162b966 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -152,3 +152,9 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err // TODO(leventeliu): get confirmed state from tx history. return } + +func (c *Chain) immutableNextNonce(addr proto.AccountAddress) (n pi.AccountNonce, err error) { + c.RLock() + defer c.RUnlock() + return c.immutable.nextNonce(addr) +} diff --git a/blockproducer/limits/limits.go b/blockproducer/limits/limits.go index 782b69e99..fab06d93d 100644 --- a/blockproducer/limits/limits.go +++ b/blockproducer/limits/limits.go @@ -21,4 +21,6 @@ const ( // MaxTxBroadcastTTL defines the TTL limit of a AddTx request broadcasting within the // block producers. MaxTxBroadcastTTL = 1 + // MaxPendingTxsPerAccount defines the limit of pending transactions of one account. + MaxPendingTxsPerAccount = 1000 ) From 9b4b6255f1e22cc1ac4c17ca4673fedcce9524f7 Mon Sep 17 00:00:00 2001 From: leveteliu Date: Fri, 4 Jan 2019 21:53:41 +0800 Subject: [PATCH 032/302] Minor fix --- blockproducer/chain.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 9dabde41d..10fb606bb 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -483,8 +483,8 @@ func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { le.WithError(err).Warn("failed to load base nonce of transaction account") return } - if nonce < base || nonce >= nonce+pl.MaxPendingTxsPerAccount { - // TODO(leventeliu): should persist to some where for tx query? + if nonce < base || nonce >= base+pl.MaxPendingTxsPerAccount { + // TODO(leventeliu): should persist to somewhere for tx query? le.WithFields(log.Fields{ "base_nonce": base, "pending_limit": pl.MaxPendingTxsPerAccount, From b2a227dd34e6bde596c014fbc9416f0427ca9013 Mon Sep 17 00:00:00 2001 From: leveteliu Date: Sat, 5 Jan 2019 12:49:18 +0800 Subject: [PATCH 033/302] Shorten role field in BP info string --- blockproducer/bpinfo.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index 710ee452e..f63c959d9 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -52,9 +52,9 @@ func newBlockProduerInfos( bpInfos = make([]*blockProducerInfo, total) for i, v := range peers.PeersHeader.Servers { - var role = "Follower" + var role = "F" if v == peers.Leader { - role = "Leader" + role = "L" } bpInfos[i] = &blockProducerInfo{ rank: uint32(i), From 0b454c0ed3f21b98df818b0baeaddf7bf6d51b9c Mon Sep 17 00:00:00 2001 From: leveteliu Date: Sat, 5 Jan 2019 13:46:32 +0800 Subject: [PATCH 034/302] Remove unnecessary String calls in log fields --- blockproducer/chain.go | 18 +++++++++--------- blockproducer/chain_gossip.go | 14 +++++++------- blockproducer/metastate.go | 10 +++++----- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 10fb606bb..cc42e4c6f 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -251,9 +251,9 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) txPool: txPool, } log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), - "period": c.period.String(), - "tick": c.tick.String(), + "local": c.getLocalBPInfo(), + "period": c.period, + "tick": c.tick, "height": c.head().height, }).Debug("current chain state") return @@ -280,7 +280,7 @@ func (c *Chain) Start() { func (c *Chain) Stop() (err error) { // Stop main process var le = log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), + "local": c.getLocalBPInfo(), }) le.Debug("stopping chain") c.stop() @@ -361,7 +361,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { var elapsed = -d log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), + "local": c.getLocalBPInfo(), "enclosing_height": c.getNextHeight() - 1, "using_timestamp": now.Format(time.RFC3339Nano), "elapsed_seconds": elapsed.Seconds(), @@ -456,7 +456,7 @@ func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { "hash": txhash.Short(4), "address": addr, "nonce": nonce, - "type": tx.GetTransactionType().String(), + "type": tx.GetTransactionType(), }) base pi.AccountNonce @@ -541,7 +541,7 @@ func (c *Chain) mainCycle(ctx context.Context) { c.advanceNextHeight(t, d) } else { log.WithFields(log.Fields{ - "peer": c.getLocalBPInfo().String(), + "peer": c.getLocalBPInfo(), "next_height": c.getNextHeight(), "head_height": c.head().height, "head_block": c.head().hash.Short(4), @@ -605,7 +605,7 @@ func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { if ok = unreachable+needConfirms <= serversNum; !ok { log.WithFields(log.Fields{ - "peer": c.getLocalBPInfo().String(), + "peer": c.getLocalBPInfo(), "sync_head_height": h, "unreachable_count": unreachable, }).Warn("one or more block producers are currently unreachable") @@ -672,7 +672,7 @@ func (c *Chain) replaceAndSwitchToBranch( ); err != nil || v.GetAccountNonce() < base { log.WithFields(log.Fields{ "hash": k.Short(4), - "type": v.GetTransactionType().String(), + "type": v.GetTransactionType(), "account": v.GetAccountAddress(), "nonce": v.GetAccountNonce(), diff --git a/blockproducer/chain_gossip.go b/blockproducer/chain_gossip.go index 9632d3303..ae45182ea 100644 --- a/blockproducer/chain_gossip.go +++ b/blockproducer/chain_gossip.go @@ -43,8 +43,8 @@ func (c *Chain) nonblockingBroadcastBlock(block *types.BPBlock) { ctx, remote.nodeID, route.MCCAdviseNewBlock.String(), req, nil) ) log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), - "remote": remote.String(), + "local": c.getLocalBPInfo(), + "remote": remote, "block_time": block.Timestamp(), "block_hash": block.BlockHash().Short(4), "parent_hash": block.ParentHash().Short(4), @@ -70,11 +70,11 @@ func (c *Chain) nonblockingBroadcastTx(ttl uint32, tx pi.Transaction) { ctx, remote.nodeID, route.MCCAddTx.String(), req, nil) ) log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), - "remote": remote.String(), + "local": c.getLocalBPInfo(), + "remote": remote, "hash": tx.Hash().Short(4), "address": tx.GetAccountAddress(), - "type": tx.GetTransactionType().String(), + "type": tx.GetTransactionType(), }).WithError(err).Debug("broadcast transaction to other peers") }, c.tick) }(info) @@ -105,8 +105,8 @@ func (c *Chain) blockingFetchBlock(ctx context.Context, h uint32) (unreachable u resp = &types.FetchBlockResp{} ) var le = log.WithFields(log.Fields{ - "local": c.getLocalBPInfo().String(), - "remote": remote.String(), + "local": c.getLocalBPInfo(), + "remote": remote, "height": h, }) if err = c.cl.CallNodeWithContext( diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 1dfaf3891..512dad249 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -106,7 +106,7 @@ func (s *metaState) loadAccountTokenBalance(addr proto.AccountAddress, log.WithFields(log.Fields{ "account": addr.String(), "balance": b, - "tokenType": tokenType.String(), + "tokenType": tokenType, "loaded": loaded, }).Debug("queried token account") }() @@ -856,7 +856,7 @@ func isProviderReqMatch(po *types.ProviderProfile, req *types.CreateDatabase) (m if po.TokenType != req.TokenType { err = errors.New("token type mismatch") log.WithError(err).Debugf("miner's token type: %s, user's token type: %s", - po.TokenType.String(), req.TokenType.String()) + po.TokenType, req.TokenType) return } @@ -867,7 +867,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { sender, err := crypto.PubKeyHash(tx.Signee) if err != nil { log.WithFields(log.Fields{ - "tx": tx.Hash().String(), + "tx": tx.Hash(), }).WithError(err).Error("unexpected err") return } @@ -1116,7 +1116,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err "addr": account.Address.String(), "amount": account.TokenBalance[transfer.TokenType], "transfer_amount": transfer.Amount, - "token_type": transfer.TokenType.String(), + "token_type": transfer.TokenType, }).WithError(err).Warning("in transferSQLChainTokenBalance") return } @@ -1245,7 +1245,7 @@ func (s *metaState) generateGenesisBlock(dbID proto.DatabaseID, resourceMeta typ } func (s *metaState) apply(t pi.Transaction) (err error) { - log.Infof("get tx: %s", t.GetTransactionType().String()) + log.Infof("get tx: %s", t.GetTransactionType()) // NOTE(leventeliu): bypass pool in this method. var ( addr = t.GetAccountAddress() From 6d5d33ac3fefb6c944217d8626eb2a2801c96132 Mon Sep 17 00:00:00 2001 From: leveteliu Date: Sat, 5 Jan 2019 14:18:09 +0800 Subject: [PATCH 035/302] Add compile changes method for metaState --- blockproducer/chain.go | 48 +++----------------------------------- blockproducer/metastate.go | 29 +++++++++++++++++++++++ 2 files changed, 32 insertions(+), 45 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index cc42e4c6f..6e09bd1a6 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -123,9 +123,8 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) return } - // Storage genesis + // Create initial state from genesis block and store if !existed { - // TODO(leventeliu): reuse chain.replaceAndSwitchToBranch to construct initial state. var init = newMetaState() for _, v := range cfg.Genesis.Transactions { if ierr = init.apply(v); ierr != nil { @@ -133,29 +132,8 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) return } } - var sps []storageProcedure + var sps = init.compileChanges(nil) sps = append(sps, addBlock(0, cfg.Genesis)) - for k, v := range init.dirty.accounts { - if v != nil { - sps = append(sps, updateAccount(v)) - } else { - sps = append(sps, deleteAccount(k)) - } - } - for k, v := range init.dirty.databases { - if v != nil { - sps = append(sps, updateShardChain(v)) - } else { - sps = append(sps, deleteShardChain(k)) - } - } - for k, v := range init.dirty.provider { - if v != nil { - sps = append(sps, updateProvider(v)) - } else { - sps = append(sps, deleteProvider(k)) - } - } sps = append(sps, updateIrreversible(cfg.Genesis.SignedHeader.BlockHash)) if ierr = store(st, sps, nil); ierr != nil { err = errors.Wrap(ierr, "failed to initialize storage") @@ -684,28 +662,8 @@ func (c *Chain) replaceAndSwitchToBranch( } // Prepare storage procedures to update immutable database + sps = c.immutable.compileChanges(sps) sps = append(sps, addBlock(height, newBlock)) - for k, v := range c.immutable.dirty.accounts { - if v != nil { - sps = append(sps, updateAccount(v)) - } else { - sps = append(sps, deleteAccount(k)) - } - } - for k, v := range c.immutable.dirty.databases { - if v != nil { - sps = append(sps, updateShardChain(v)) - } else { - sps = append(sps, deleteShardChain(k)) - } - } - for k, v := range c.immutable.dirty.provider { - if v != nil { - sps = append(sps, updateProvider(v)) - } else { - sps = append(sps, deleteProvider(k)) - } - } for _, n := range newIrres { sps = append(sps, deleteTxs(n.block.Transactions)) } diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 512dad249..eeeb87dd9 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -1286,6 +1286,35 @@ func (s *metaState) makeCopy() *metaState { } } +// compileChanges compiles storage procedures for changes in dirty map. +func (s *metaState) compileChanges( + dst []storageProcedure) (results []storageProcedure, +) { + results = dst + for k, v := range s.dirty.accounts { + if v != nil { + results = append(results, updateAccount(v)) + } else { + results = append(results, deleteAccount(k)) + } + } + for k, v := range s.dirty.databases { + if v != nil { + results = append(results, updateShardChain(v)) + } else { + results = append(results, deleteShardChain(k)) + } + } + for k, v := range s.dirty.provider { + if v != nil { + results = append(results, updateProvider(v)) + } else { + results = append(results, deleteProvider(k)) + } + } + return +} + func minDeposit(gasPrice uint64, minerNumber uint64) uint64 { return gasPrice * uint64(conf.GConf.QPS) * conf.GConf.BillingBlockCount * minerNumber From e1beba50333cf4f3990e1596e17159cc50f68880 Mon Sep 17 00:00:00 2001 From: auxten Date: Sat, 5 Jan 2019 18:34:55 +0800 Subject: [PATCH 036/302] Changelog v0.2.0 --- CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 562baa25a..95bff0e5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [v0.2.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.2.0) (2019-01-05) + +[Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.1.0...v0.2.0) + +**Merged pull requests:** + +- Update GNTE config [\#193](https://github.com/CovenantSQL/CovenantSQL/pull/193) ([laodouya](https://github.com/laodouya)) +- Fix matchProvidersWithUser inconsistent [\#188](https://github.com/CovenantSQL/CovenantSQL/pull/188) ([auxten](https://github.com/auxten)) +- Speed up BPs at genesis startup [\#186](https://github.com/CovenantSQL/CovenantSQL/pull/186) ([leventeliu](https://github.com/leventeliu)) +- Wait for database creation fix [\#185](https://github.com/CovenantSQL/CovenantSQL/pull/185) ([xq262144](https://github.com/xq262144)) +- Simplify cql and cql-utils log [\#184](https://github.com/CovenantSQL/CovenantSQL/pull/184) ([auxten](https://github.com/auxten)) +- Fix Makefile PHONY, add push\_testnet [\#183](https://github.com/CovenantSQL/CovenantSQL/pull/183) ([auxten](https://github.com/auxten)) +- Fix issue: duplicate branches [\#182](https://github.com/CovenantSQL/CovenantSQL/pull/182) ([leventeliu](https://github.com/leventeliu)) +- Update testnet conf [\#181](https://github.com/CovenantSQL/CovenantSQL/pull/181) ([auxten](https://github.com/auxten)) +- Remove base58 wallet address [\#179](https://github.com/CovenantSQL/CovenantSQL/pull/179) ([auxten](https://github.com/auxten)) +- Fix GNTE test config missing miner wallet init coin [\#178](https://github.com/CovenantSQL/CovenantSQL/pull/178) ([laodouya](https://github.com/laodouya)) +- Upgrade transaction structure: add Timestamp field [\#177](https://github.com/CovenantSQL/CovenantSQL/pull/177) ([ggicci](https://github.com/ggicci)) +- Block main cycle when BP network is unreachable [\#176](https://github.com/CovenantSQL/CovenantSQL/pull/176) ([leventeliu](https://github.com/leventeliu)) +- Remove useless hash in base58 encoded private key [\#175](https://github.com/CovenantSQL/CovenantSQL/pull/175) ([auxten](https://github.com/auxten)) +- Prune unused codes [\#174](https://github.com/CovenantSQL/CovenantSQL/pull/174) ([leventeliu](https://github.com/leventeliu)) +- Fix docker entry point [\#173](https://github.com/CovenantSQL/CovenantSQL/pull/173) ([leventeliu](https://github.com/leventeliu)) +- Add permission granting/revoking [\#172](https://github.com/CovenantSQL/CovenantSQL/pull/172) ([leventeliu](https://github.com/leventeliu)) +- Extract observer to an independent docker image [\#163](https://github.com/CovenantSQL/CovenantSQL/pull/163) ([laodouya](https://github.com/laodouya)) + ## [v0.1.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.1.0) (2018-12-29) [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.0.6...v0.1.0) From ece43bf5aa2ee2012658c30427109c8a4c85a4ab Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 6 Jan 2019 19:35:41 +0800 Subject: [PATCH 037/302] Fix when rCount != ETLSHeaderSize, err == nil --- rpc/server.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/rpc/server.go b/rpc/server.go index 30d1291e7..beaf595f1 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -179,10 +179,16 @@ func handleCipher(conn net.Conn) (cryptoConn *etls.CryptoConn, err error) { // NodeID + Uint256 Nonce headerBuf := make([]byte, ETLSHeaderSize) rCount, err := conn.Read(headerBuf) - if err != nil || rCount != ETLSHeaderSize { + if err != nil { log.WithError(err).Error("read node header error") return } + + if rCount != ETLSHeaderSize { + err = errors.New("invalid ETLS header size") + return + } + if headerBuf[0] != etls.ETLSMagicBytes[0] || headerBuf[1] != etls.ETLSMagicBytes[1] { err = errors.New("bad ETLS header") return From 81a7132a1ed80732e34f3e59b5d5fe68b25d6784 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 6 Jan 2019 20:43:16 +0800 Subject: [PATCH 038/302] Adjust kayak timeout config --- cmd/cqld/bootstrap.go | 12 +++++++----- worker/db.go | 10 ++++++++-- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 4a5770f1c..d17fe6036 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -40,9 +40,11 @@ import ( ) const ( - kayakServiceName = "Kayak" - kayakMethodName = "Call" - kayakWalFileName = "kayak.ldb" + kayakServiceName = "Kayak" + kayakMethodName = "Call" + kayakWalFileName = "kayak.ldb" + kayakPrepareTimeout = 10 * time.Second + kayakCommitTimeout = time.Minute ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { @@ -194,8 +196,8 @@ func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.H Handler: h, PrepareThreshold: 1.0, CommitThreshold: 1.0, - PrepareTimeout: time.Second, - CommitTimeout: time.Second * 60, + PrepareTimeout: kayakPrepareTimeout, + CommitTimeout: kayakCommitTimeout, Peers: peers, Wal: logWal, NodeID: node.ID, diff --git a/worker/db.go b/worker/db.go index 5a2043eb1..ab88e0526 100644 --- a/worker/db.go +++ b/worker/db.go @@ -56,6 +56,12 @@ const ( // CommitThreshold defines the commit complete threshold. CommitThreshold = 1.0 + // PrepareTimeout defines the prepare timeout config. + PrepareTimeout = 5 * time.Second + + // CommitTimeout defines the commit timeout config. + CommitTimeout = time.Minute + // SlowQuerySampleSize defines the maximum slow query log size (default: 1KB). SlowQuerySampleSize = 1 << 10 ) @@ -161,8 +167,8 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, Handler: db, PrepareThreshold: PrepareThreshold, CommitThreshold: CommitThreshold, - PrepareTimeout: time.Second, - CommitTimeout: time.Second * 60, + PrepareTimeout: PrepareTimeout, + CommitTimeout: CommitTimeout, Peers: peers, Wal: db.kayakWal, NodeID: db.nodeID, From 16c06e4e5a0d458c79530bc6023e1b5ed61eb3d3 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 6 Jan 2019 21:51:47 +0800 Subject: [PATCH 039/302] Add COVENANT_ALERT env variable for restart alarm --- bin/docker-entry.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 6c3e5835b..4e760879b 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -2,6 +2,8 @@ echo nameserver 1.1.1.1 > /etc/resolv.conf +[ -s "${COVENANT_ALERT}" ] && [ -x "${COVENANT_ALERT}" ] && (eval "${COVENANT_ALERT}") + case "${COVENANT_ROLE}" in miner) exec /app/cql-minerd -config "${COVENANT_CONF}" "${@}" From eeeca8ea1235d29b2b2108a79ca70130d0118dcb Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 6 Jan 2019 22:26:38 +0800 Subject: [PATCH 040/302] Disable ntp metric collect for now --- metric/collector.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metric/collector.go b/metric/collector.go index 2ed5d01f1..5a574011c 100644 --- a/metric/collector.go +++ b/metric/collector.go @@ -54,7 +54,7 @@ func NewNodeCollector() (*NodeCollector, error) { collectors["cpu"], _ = NewCPUCollector() collectors["diskstats"], _ = NewDiskstatsCollector() collectors["filesystem"], _ = NewFilesystemCollector() - collectors["ntp"], _ = NewNtpCollector() + //collectors["ntp"], _ = NewNtpCollector() collectors["loadavg"], _ = NewLoadavgCollector() return &NodeCollector{Collectors: collectors}, nil From 333010294f076106dbb81f14f70e9f445aac79c1 Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 6 Jan 2019 22:26:55 +0800 Subject: [PATCH 041/302] Refactor rpc error log to errors.Wrap or errors.Wrapf --- rpc/client.go | 31 +++++++++++++----------------- rpc/pool.go | 13 +++++++------ rpc/rpcutil.go | 46 +++++++++++++++++++-------------------------- rpc/server.go | 16 ++++++++-------- rpc/sharedsecret.go | 22 +++++++++++----------- 5 files changed, 58 insertions(+), 70 deletions(-) diff --git a/rpc/client.go b/rpc/client.go index cfdf15593..2fa769e5c 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -21,15 +21,15 @@ import ( "net" "net/rpc" + "github.com/pkg/errors" + mux "github.com/xtaci/smux" + "github.com/CovenantSQL/CovenantSQL/crypto/etls" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" - mux "github.com/xtaci/smux" ) const ( @@ -61,7 +61,7 @@ func init() { func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.Cipher, isAnonymous bool) (c *etls.CryptoConn, err error) { conn, err := net.Dial(network, address) if err != nil { - log.WithField("addr", address).WithError(err).Error("connect to node failed") + err = errors.Wrapf(err, "connect to node %s failed", address) return } writeBuf := make([]byte, ETLSHeaderSize) @@ -76,12 +76,12 @@ func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.C var nonce *cpuminer.Uint256 nodeIDBytes, err = kms.GetLocalNodeIDBytes() if err != nil { - log.WithError(err).Error("get local node id failed") + err = errors.Wrap(err, "get local node id failed") return } nonce, err = kms.GetLocalNonce() if err != nil { - log.WithError(err).Error("get local nonce failed") + err = errors.Wrap(err, "get local nonce failed") return } copy(writeBuf[2:2+hash.HashSize], nodeIDBytes) @@ -89,7 +89,7 @@ func dial(network, address string, remoteNodeID *proto.RawNodeID, cipher *etls.C } wrote, err := conn.Write(writeBuf) if err != nil { - log.WithError(err).Error("write node id and nonce failed") + err = errors.Wrap(err, "write node id and nonce failed") return } @@ -109,17 +109,16 @@ func DialToNode(nodeID proto.NodeID, pool *SessionPool, isAnonymous bool) (conn var sess *mux.Session ETLSConn, err = dialToNodeEx(nodeID, isAnonymous) if err != nil { - log.WithField("target", nodeID).WithError(err).Error("dialToNode failed") return } sess, err = mux.Client(ETLSConn, YamuxConfig) if err != nil { - log.WithField("target", nodeID).WithError(err).Error("init yamux client failed") + err = errors.Wrapf(err, "init yamux client to %s failed", nodeID) return } conn, err = sess.OpenStream() if err != nil { - log.WithField("target", nodeID).WithError(err).Error("open new session failed") + err = errors.Wrapf(err, "open new session to %s failed", nodeID) } return } @@ -153,23 +152,19 @@ func dialToNodeEx(nodeID proto.NodeID, isAnonymous bool) (conn net.Conn, err err */ symmetricKey, err := GetSharedSecretWith(rawNodeID, isAnonymous) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("get shared secret failed") return } nodeAddr, err := GetNodeAddr(rawNodeID) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("resolve node failed") + err = errors.Wrapf(err, "resolve %s failed", rawNodeID.String()) return } cipher := etls.NewCipher(symmetricKey) conn, err = dial("tcp", nodeAddr, rawNodeID, cipher, isAnonymous) if err != nil { - log.WithFields(log.Fields{ - "target": rawNodeID.String(), - "addr": nodeAddr, - }).WithError(err).Error("connect failed") + err = errors.Wrapf(err, "connect %s %s failed", rawNodeID.String(), nodeAddr) return } @@ -199,13 +194,13 @@ func InitClientConn(conn net.Conn) (client *Client, err error) { var sess *mux.Session sess, err = mux.Client(conn, YamuxConfig) if err != nil { - log.WithError(err).Error("init yamux client failed") + err = errors.Wrap(err, "init mux client failed") return } muxConn, err = sess.OpenStream() if err != nil { - log.WithError(err).Error("open stream failed") + err = errors.Wrap(err, "open stream failed") return } } diff --git a/rpc/pool.go b/rpc/pool.go index ad3f3c90f..1611c1ab1 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -20,9 +20,10 @@ import ( "net" "sync" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" mux "github.com/xtaci/smux" + + "github.com/CovenantSQL/CovenantSQL/proto" ) // SessPool is the session pool interface @@ -105,7 +106,7 @@ func (p *SessionPool) LoadOrStore(id proto.NodeID, newSess *Session) (sess *Sess sess, exist := p.sessions[id] if exist { p.Unlock() - log.WithField("node", id).Debug("load session for target node") + //log.WithField("node", id).Debug("load session for target node") loaded = true } else { p.sessions[id] = newSess @@ -132,7 +133,7 @@ func (p *SessionPool) Get(id proto.NodeID) (conn net.Conn, err error) { //log.WithField("node", id).Debug("reusing session") return } - log.WithField("target", id).WithError(err).Error("open session failed") + //log.WithField("target", id).WithError(err).Debug("open session failed") p.Remove(id) } @@ -140,13 +141,13 @@ func (p *SessionPool) Get(id proto.NodeID) (conn net.Conn, err error) { // Can't find existing Session, try to dial one newConn, err := p.nodeDialer(id) if err != nil { - log.WithField("node", id).WithError(err).Error("dial new session failed") + err = errors.Wrapf(err, "dial new session to %s failed", id) return } newSess, err := toSession(id, newConn) if err != nil { newConn.Close() - log.WithField("node", id).WithError(err).Error("dial new session failed") + err = errors.Wrapf(err, "create new session to %s failed", id) return } sess, loaded := p.LoadOrStore(id, newSess) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index c5ee4329e..839c61868 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -18,7 +18,6 @@ package rpc import ( "context" - "errors" "io" "math/rand" "net" @@ -30,6 +29,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" mux "github.com/xtaci/smux" ) @@ -66,17 +66,16 @@ func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { c.Lock() defer c.Unlock() if c.client == nil { - log.Debug("init new rpc client") var conn net.Conn conn, err = DialToNode(c.TargetID, c.pool, isAnonymous) if err != nil { - log.WithField("target", c.TargetID).WithError(err).Error("dial to node failed") + err = errors.Wrap(err, "dial to node failed") return } //conn.SetDeadline(time.Time{}) c.client, err = InitClientConn(conn) if err != nil { - log.WithError(err).Error("init RPC client failed") + err = errors.Wrap(err, "init RPC client failed") return } } @@ -87,7 +86,7 @@ func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { func (c *PersistentCaller) Call(method string, args interface{}, reply interface{}) (err error) { err = c.initClient(method == route.DHTPing.String()) if err != nil { - log.WithError(err).Error("init PersistentCaller client failed") + err = errors.Wrap(err, "init PersistentCaller client failed") return } err = c.client.Call(method, args, reply) @@ -101,10 +100,11 @@ func (c *PersistentCaller) Call(method string, args interface{}, reply interface // if got EOF, retry once reconnectErr := c.ResetClient(method) if reconnectErr != nil { - log.WithField("rpc", method).WithError(reconnectErr).Error("reconnect failed") + err = errors.Wrap(reconnectErr, "reconnect failed") } } - log.WithField("rpc", method).WithError(err).Error("call RPC failed") + err = errors.Wrap(err, "call RPC failed") + return } return } @@ -160,7 +160,7 @@ func (c *Caller) CallNodeWithContext( ctx context.Context, node proto.NodeID, method string, args interface{}, reply interface{}) (err error) { conn, err := DialToNode(node, c.pool, method == route.DHTPing.String()) if err != nil { - log.WithField("node", node).WithError(err).Error("dial to node failed") + err = errors.Wrapf(err, "dial to node %s failed", node) return } @@ -175,7 +175,7 @@ func (c *Caller) CallNodeWithContext( client, err := InitClientConn(conn) if err != nil { - log.WithError(err).Error("init RPC client failed") + err = errors.Wrap(err, "init RPC client failed") return } @@ -202,7 +202,7 @@ func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { if err == route.ErrUnknownNodeID { BPs := route.GetBPs() if len(BPs) == 0 { - log.Error("no available BP") + err = errors.New("no available BP") return } client := NewCaller() @@ -215,10 +215,7 @@ func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { method := "DHT.FindNode" err = client.CallNode(bp, method, reqFN, respFN) if err != nil { - log.WithFields(log.Fields{ - "bpNode": bp, - "rpc": method, - }).WithError(err).Error("call dht rpc failed") + err = errors.Wrapf(err, "call dht rpc %s to %s failed", method, bp) return } route.SetNodeAddrCache(id, respFN.Node.Addr) @@ -232,11 +229,11 @@ func GetNodeAddr(id *proto.RawNodeID) (addr string, err error) { func GetNodeInfo(id *proto.RawNodeID) (nodeInfo *proto.Node, err error) { nodeInfo, err = kms.GetNodeInfo(proto.NodeID(id.String())) if err != nil { - log.WithField("target", id.String()).WithError(err).Info("get node info from KMS failed") + //log.WithField("target", id.String()).WithError(err).Info("get node info from KMS failed") if err == kms.ErrKeyNotFound { BPs := route.GetBPs() if len(BPs) == 0 { - log.Error("no available BP") + err = errors.New("no available BP") return } client := NewCaller() @@ -248,10 +245,7 @@ func GetNodeInfo(id *proto.RawNodeID) (nodeInfo *proto.Node, err error) { method := "DHT.FindNode" err = client.CallNode(bp, method, reqFN, respFN) if err != nil { - log.WithFields(log.Fields{ - "bpNode": bp, - "rpc": method, - }).WithError(err).Error("call dht rpc failed") + err = errors.Wrapf(err, "call dht rpc %s to %s failed", method, bp) return } nodeInfo = respFN.Node @@ -279,11 +273,9 @@ func PingBP(node *proto.Node, BPNodeID proto.NodeID) (err error) { resp := new(proto.PingResp) err = client.CallNode(BPNodeID, "DHT.Ping", req, resp) if err != nil { - log.WithError(err).Error("call DHT.Ping failed") + err = errors.Wrap(err, "call DHT.Ping failed") return } - log.Debugf("PingBP resp: %#v", resp) - return } @@ -328,16 +320,16 @@ func GetCurrentBP() (bpNodeID proto.NodeID, err error) { } if len(res.Nodes) <= 0 { - log.Error("get no hash nearest block producer nodes") // node not found - err = ErrNoChiefBlockProducerAvailable + err = errors.Wrapf(ErrNoChiefBlockProducerAvailable, + "get no hash nearest block producer nodes") return } if res.Nodes[0].Role != proto.Leader && res.Nodes[0].Role != proto.Follower { - log.Error("no suitable nodes with proper block producer role") // not block producer - err = ErrNoChiefBlockProducerAvailable + err = errors.Wrap(ErrNoChiefBlockProducerAvailable, + "no suitable nodes with proper block producer role") return } diff --git a/rpc/server.go b/rpc/server.go index 693f3771a..630485600 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -65,13 +65,13 @@ func (s *Server) InitRPCServer( err = kms.InitLocalKeyPair(privateKeyPath, masterKey) if err != nil { - log.WithError(err).Error("init local key pair failed") + err = errors.Wrap(err, "init local key pair failed") return } l, err := etls.NewCryptoListener("tcp", addr, handleCipher) if err != nil { - log.WithError(err).Error("create crypto listener failed") + err = errors.Wrap(err, "create crypto listener failed") return } @@ -131,7 +131,7 @@ func (s *Server) handleConn(conn net.Conn) { sess, err := mux.Server(conn, YamuxConfig) if err != nil { - log.Error(err) + err = errors.Wrap(err, "create mux server failed") return } defer sess.Close() @@ -146,9 +146,9 @@ sessionLoop: muxConn, err := sess.AcceptStream() if err != nil { if err == io.EOF { - log.WithField("remote", remoteNodeID).Debug("session connection closed") + //log.WithField("remote", remoteNodeID).Debug("session connection closed") } else { - log.WithField("remote", remoteNodeID).WithError(err).Error("session accept failed") + err = errors.Wrapf(err, "session accept failed, remote: %s", remoteNodeID) } break sessionLoop } @@ -180,8 +180,8 @@ func handleCipher(conn net.Conn) (cryptoConn *etls.CryptoConn, err error) { // NodeID + Uint256 Nonce headerBuf := make([]byte, ETLSHeaderSize) rCount, err := conn.Read(headerBuf) - if err != nil || rCount != ETLSHeaderSize { - log.WithError(err).Error("read node header error") + if err != nil { + err = errors.Wrap(err, "read node header error") return } if headerBuf[0] != etls.ETLSMagicBytes[0] || headerBuf[1] != etls.ETLSMagicBytes[1] { @@ -200,7 +200,7 @@ func handleCipher(conn net.Conn) (cryptoConn *etls.CryptoConn, err error) { rawNodeID.IsEqual(&kms.AnonymousRawNodeID.Hash), ) if err != nil { - log.WithField("target", rawNodeID.String()).WithError(err).Error("get shared secret") + err = errors.Wrapf(err, "get shared secret, target: %s", rawNodeID.String()) return } cipher := etls.NewCipher(symmetricKey) diff --git a/rpc/sharedsecret.go b/rpc/sharedsecret.go index 86d150464..9ed67ec0c 100644 --- a/rpc/sharedsecret.go +++ b/rpc/sharedsecret.go @@ -17,15 +17,15 @@ package rpc import ( - "fmt" "sync" + "github.com/pkg/errors" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/utils/log" ) var symmetricKeyCache sync.Map @@ -34,7 +34,7 @@ var symmetricKeyCache sync.Map func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKey []byte, err error) { if isAnonymous { symmetricKey = []byte(`!&\\!qEyey*\cbLc,aKl`) - log.Debug("using anonymous ETLS") + //log.Debug("using anonymous ETLS") } else { symmetricKeyI, ok := symmetricKeyCache.Load(nodeID) if ok { @@ -46,7 +46,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe } else if conf.RoleTag[0] == conf.BlockProducerBuildTag[0] { remotePublicKey, err = kms.GetPublicKey(proto.NodeID(nodeID.String())) if err != nil { - log.WithField("node", nodeID).WithError(err).Error("get public key locally failed") + err = errors.Wrapf(err, "get public key locally failed, node: %s", nodeID) return } } else { @@ -54,7 +54,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe var nodeInfo *proto.Node nodeInfo, err = GetNodeInfo(nodeID) if err != nil { - log.WithField("node", nodeID).WithError(err).Error("get public key failed") + err = errors.Wrapf(err, "get public key failed, node: %s", nodeID) return } remotePublicKey = nodeInfo.PublicKey @@ -63,17 +63,17 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe var localPrivateKey *asymmetric.PrivateKey localPrivateKey, err = kms.GetLocalPrivateKey() if err != nil { - log.WithError(err).Error("get local private key failed") + err = errors.Wrap(err, "get local private key failed") return } symmetricKey = asymmetric.GenECDHSharedSecret(localPrivateKey, remotePublicKey) symmetricKeyCache.Store(nodeID, symmetricKey) - log.WithFields(log.Fields{ - "node": nodeID.String(), - "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), - "sessionKey": fmt.Sprintf("%#x", symmetricKey), - }).Debug("generated shared secret") + //log.WithFields(log.Fields{ + // "node": nodeID.String(), + // "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), + // "sessionKey": fmt.Sprintf("%#x", symmetricKey), + //}).Debug("generated shared secret") } //log.Debugf("ECDH for %s Public Key: %x, Private Key: %x Session Key: %x", // nodeID.ToNodeID(), remotePublicKey.Serialize(), localPrivateKey.Serialize(), symmetricKey) From 9761803f4f185a60a104b9ed33e9058cfe8d2ac3 Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 6 Jan 2019 22:41:04 +0800 Subject: [PATCH 042/302] Fix typo --- rpc/rpcutil.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 839c61868..e72000a40 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -103,7 +103,7 @@ func (c *PersistentCaller) Call(method string, args interface{}, reply interface err = errors.Wrap(reconnectErr, "reconnect failed") } } - err = errors.Wrap(err, "call RPC failed") + err = errors.Wrapf(err, "call %s failed", method) return } return @@ -321,7 +321,7 @@ func GetCurrentBP() (bpNodeID proto.NodeID, err error) { if len(res.Nodes) <= 0 { // node not found - err = errors.Wrapf(ErrNoChiefBlockProducerAvailable, + err = errors.Wrap(ErrNoChiefBlockProducerAvailable, "get no hash nearest block producer nodes") return } From b64a8d875ec5f98ef25bf987a4bbc7a06cf4d4ea Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 7 Jan 2019 00:06:29 +0800 Subject: [PATCH 043/302] Fix missing .PHONY --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e85983717..43d6b462f 100644 --- a/Makefile +++ b/Makefile @@ -217,5 +217,5 @@ clean: rm -rf bin/cql* .PHONY: status start stop logs push push_testnet clean \ - bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils \ + bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils bin/cql-observer bin/cql-observer.test \ bin/cql bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer From 556c53f14037dd4a26ad8def6f0b1f4e133f9c80 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 00:29:25 +0800 Subject: [PATCH 044/302] Fix RPC client unmarshal errror on Block.Timestamp --- api/models/blocks.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/api/models/blocks.go b/api/models/blocks.go index 52f543ab9..707a84548 100644 --- a/api/models/blocks.go +++ b/api/models/blocks.go @@ -14,8 +14,8 @@ type BlocksModel struct{} type Block struct { Height int `db:"height" json:"height"` // pk Hash string `db:"hash" json:"hash"` - Timestamp int64 `db:"timestamp" json:"-"` - TimestampHuman time.Time `db:"-" json:"timestamp"` + Timestamp int64 `db:"timestamp" json:"timestamp"` + TimestampHuman time.Time `db:"-" json:"timestamp_human"` Version int32 `db:"version" json:"version"` Producer string `db:"producer" json:"producer"` MerkleRoot string `db:"merkle_root" json:"merkle_root"` @@ -32,7 +32,7 @@ func (b *Block) PostGet(s gorp.SqlExecutor) error { // GetBlockList get a list of blocks with height in [from, to). func (m *BlocksModel) GetBlockList(from, to int) (blocks []*Block, err error) { query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count - FROM indexed_blocks WHERE height >= ? and height < ?` + FROM indexed_blocks WHERE height >= ? and height < ? ORDER BY height DESC` _, err = chaindb.Select(&blocks, query, from, to) return blocks, err } From 58957966e0d7755493f1f513ad8a9e87235e8d2a Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 00:32:45 +0800 Subject: [PATCH 045/302] Fix bp_getTransactionList bug --- api/models/transactions.go | 8 ++++---- api/transactions.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/api/models/transactions.go b/api/models/transactions.go index e736a71f2..302f1b0f9 100644 --- a/api/models/transactions.go +++ b/api/models/transactions.go @@ -18,8 +18,8 @@ type Transaction struct { TxIndex int `db:"tx_index" json:"index"` // pk2 Hash string `db:"hash" json:"hash"` BlockHash string `db:"block_hash" json:"block_hash"` - Timestamp int64 `db:"timestamp" json:"-"` - TimestampHuman time.Time `db:"-" json:"timestamp"` + Timestamp int64 `db:"timestamp" json:"timestamp"` + TimestampHuman time.Time `db:"-" json:"timestamp_human"` TxType int `db:"tx_type" json:"type"` Address string `db:"address" json:"address"` Raw string `db:"raw" json:"raw"` @@ -64,10 +64,10 @@ func (m *TransactionsModel) GetTransactionList(since, direction string, limit in query := fmt.Sprintf(`SELECT block_height, tx_index, hash, block_hash, timestamp, tx_type, address, raw FROM indexed_transactions - WHERE block_height %s ? and tx_index %s ? + WHERE block_height %s ? or (block_height = ? and tx_index %s ?) ORDER BY block_height %s, tx_index %s LIMIT ?`, compare, compare, orderBy, orderBy) - _, err = chaindb.Select(&txs, query, tx.BlockHeight, tx.TxIndex, limit) + _, err = chaindb.Select(&txs, query, tx.BlockHeight, tx.BlockHeight, tx.TxIndex, limit) return txs, err } diff --git a/api/transactions.go b/api/transactions.go index fd932678b..f613de2db 100644 --- a/api/transactions.go +++ b/api/transactions.go @@ -45,7 +45,7 @@ type bpGetTransactionByHashParams struct { func bpGetTransactionByHash(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( result interface{}, err error, ) { - params := ctx.Value("_params").(*bpGetBlockByHashParams) + params := ctx.Value("_params").(*bpGetTransactionByHashParams) model := models.TransactionsModel{} return model.GetTransactionByHash(params.Hash) } From 153a5c000301efdbd8840b6c280459a318d4e257 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 7 Jan 2019 00:35:03 +0800 Subject: [PATCH 046/302] Fix err == kms.ErrKeyNotFound --- rpc/rpcutil.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index e72000a40..4873ea8aa 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -230,7 +230,7 @@ func GetNodeInfo(id *proto.RawNodeID) (nodeInfo *proto.Node, err error) { nodeInfo, err = kms.GetNodeInfo(proto.NodeID(id.String())) if err != nil { //log.WithField("target", id.String()).WithError(err).Info("get node info from KMS failed") - if err == kms.ErrKeyNotFound { + if errors.Cause(err) == kms.ErrKeyNotFound { BPs := route.GetBPs() if len(BPs) == 0 { err = errors.New("no available BP") From 3eeffbe3bb93d5ba16cae26c7bd434a820f2799d Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 00:35:12 +0800 Subject: [PATCH 047/302] Upgrade jsonrpc.Service's bootstrap workflow 1. add StopServersAndWait to Service instance; 2. export api.models.OpenSQLiteDBAsGorp function for convenient use; 3. update cqld's bootstrap. --- api/models/models.go | 24 ++++++++++++++++++++---- api/service.go | 19 ++++++++++++++++--- cmd/cqld/bootstrap.go | 11 +++++------ 3 files changed, 41 insertions(+), 13 deletions(-) diff --git a/api/models/models.go b/api/models/models.go index f918bc2ea..cf4e295d5 100644 --- a/api/models/models.go +++ b/api/models/models.go @@ -18,16 +18,32 @@ func InitModels(dbFile string) error { return initChainDBConnection(dbFile) } -func initChainDBConnection(dbFile string) error { - dsn := fmt.Sprintf("%s?_journal=WAL&mode=ro", dbFile) +// OpenSQLiteDBAsGorp opens a sqlite database an wrapped it in gorp.DbMap. +func OpenSQLiteDBAsGorp(dbFile, mode string, maxOpen, maxIdle int) (db *gorp.DbMap, err error) { + dsn := fmt.Sprintf("%s?_journal=WAL&mode=%s", dbFile, mode) underdb, err := sql.Open("sqlite3", dsn) if err != nil { - return errors.WithMessage(err, "unable to open chain.db") + return nil, errors.Wrapf(err, "unable to open database %q", dsn) } - chaindb = &gorp.DbMap{ + underdb.SetMaxOpenConns(maxOpen) + underdb.SetMaxIdleConns(maxIdle) + + if err := underdb.Ping(); err != nil { + return nil, errors.Wrapf(err, "ping to database %q failed", dsn) + } + + db = &gorp.DbMap{ Db: underdb, Dialect: gorp.SqliteDialect{}, } + return db, nil +} + +func initChainDBConnection(dbFile string) (err error) { + chaindb, err = OpenSQLiteDBAsGorp(dbFile, "ro", 100, 30) + if err != nil { + return err + } // register tables chaindb.AddTableWithName(Block{}, "indexed_blocks").SetKeys(false, "Height") diff --git a/api/service.go b/api/service.go index ec7dfcc2f..603387a80 100644 --- a/api/service.go +++ b/api/service.go @@ -11,7 +11,6 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/api/models" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/gorilla/websocket" "github.com/pkg/errors" @@ -27,6 +26,15 @@ type Service struct { WriteTimeout time.Duration stopChan chan struct{} + stopped chan struct{} +} + +// NewService creates a new Service. +func NewService() *Service { + return &Service{ + stopChan: make(chan struct{}), + stopped: make(chan struct{}), + } } // StartServers start API servers in a non-blocking way, fatal on errors. @@ -39,6 +47,12 @@ func (s *Service) StopServers() { close(s.stopChan) } +// StopServersAndWait wait servers to stop. +func (s *Service) StopServersAndWait() { + s.StopServers() + <-s.stopped +} + // RunServers start API servers in a blocking way, fatal on errors. func (s *Service) RunServers() { // setup database @@ -46,8 +60,6 @@ func (s *Service) RunServers() { log.WithError(err).Fatal("api: init models failed") return } - - s.stopChan = make(chan struct{}) wg := sync.WaitGroup{} if s.WebsocketAddr != "" { @@ -119,4 +131,5 @@ func (s *Service) runWebsocketServer(wg *sync.WaitGroup) { } cancel() log.Warn("api: websocket server stopped") + close(s.stopped) } diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 1570113ae..697da002a 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -159,12 +159,11 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // start json-rpc server if wsapiAddr != "" { - jsonrpcServer := &api.Service{ - DBFile: conf.GConf.BP.ChainFileName, - WebsocketAddr: wsapiAddr, - ReadTimeout: 60 * time.Second, - WriteTimeout: 60 * time.Second, - } + jsonrpcServer := api.NewService() + jsonrpcServer.DBFile = conf.GConf.BP.ChainFileName + jsonrpcServer.WebsocketAddr = wsapiAddr + jsonrpcServer.ReadTimeout = 60 * time.Second + jsonrpcServer.WriteTimeout = 60 * time.Second jsonrpcServer.StartServers() } From 7428dce82bdfefb725e4d166a6e037dc3e3f630d Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 00:38:11 +0800 Subject: [PATCH 048/302] Add test cases for jsonrpc service --- api/service_test.go | 452 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 439 insertions(+), 13 deletions(-) diff --git a/api/service_test.go b/api/service_test.go index 1b2fa0782..e9ab62306 100644 --- a/api/service_test.go +++ b/api/service_test.go @@ -1,15 +1,441 @@ package api_test -// import ( -// "testing" -// -// "github.com/CovenantSQL/CovenantSQL/api" -// ) -// -// func TestService(t *testing.T) { -// service := &api.Service{ -// WebsocketAddr: ":8546", -// } -// service.RunServers() -// // TODO -// } +import ( + "context" + "fmt" + "os" + "path/filepath" + "strconv" + "testing" + + "github.com/CovenantSQL/CovenantSQL/api" + "github.com/CovenantSQL/CovenantSQL/api/models" + "github.com/pkg/errors" + + "github.com/gorilla/websocket" + . "github.com/smartystreets/goconvey/convey" + "github.com/sourcegraph/jsonrpc2" + wsstream "github.com/sourcegraph/jsonrpc2/websocket" +) + +const ( + bpA = "9jt00yI91HQ4bCdFfkXWeg" + bpB = "3ToG8OstmKcWCzLXRy2K0w" + addrA = "9JvxiUpBFpkUCCiYf84OCw" + addrB = "I4TezPRXrdBZM9Mp7cr3Gw" +) + +var ( + testdb, _ = filepath.Abs("./testdb.db3") + + ddls = []string{ + `CREATE TABLE IF NOT EXISTS "indexed_blocks" ( + "height" INTEGER PRIMARY KEY, + "hash" TEXT, + "timestamp" INTEGER, + "version" INTEGER, + "producer" TEXT, + "merkle_root" TEXT, + "parent" TEXT, + "tx_count" INTEGER + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__hash" ON "indexed_blocks" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_blocks__timestamp" ON "indexed_blocks" ("timestamp" DESC);`, + + `CREATE TABLE IF NOT EXISTS "indexed_transactions" ( + "block_height" INTEGER, + "tx_index" INTEGER, + "hash" TEXT, + "block_hash" TEXT, + "timestamp" INTEGER, + "tx_type" INTEGER, + "address" TEXT, + "raw" TEXT, + PRIMARY KEY ("block_height", "tx_index") + );`, + + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__hash" ON "indexed_transactions" ("hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__block_hash" ON "indexed_transactions" ("block_hash");`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__timestamp" ON "indexed_transactions" ("timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__tx_type__timestamp" ON "indexed_transactions" ("tx_type", "timestamp" DESC);`, + `CREATE INDEX IF NOT EXISTS "idx__indexed_transactions__address__timestamp" ON "indexed_transactions" ("address", "timestamp" DESC);`, + } + + blocksMockData = [][]interface{}{ + {1, "HGGcDJqO7tuZWwJyFxRl9g", 1546589042828174631, 1, bpA, "apple", "0000000000000000000000", 0}, + {2, "pfp8ZcSwhg15W2YSaooX8g", 1546589042482919184, 1, bpA, "apple", "HGGcDJqO7tuZWwJyFxRl9g", 1}, + {3, "NP5Ze1z8hfdG5_G8StXYLw", 1546589042010844731, 1, bpA, "apple", "pfp8ZcSwhg15W2YSaooX8g", 0}, + {4, "gZpo0Y_Wh9u6TxAnFWmiMQ", 1546589042185749429, 1, bpA, "apple", "NP5Ze1z8hfdG5_G8StXYLw", 0}, + {5, "mXMsSXd0OY5MocYl3b5r4Q", 1546589042858585920, 1, bpA, "apple", "gZpo0Y_Wh9u6TxAnFWmiMQ", 0}, + {6, "K7aFl5KIW_xKrUmfpJt6Zg", 1546590006812948193, 1, bpB, "google", "mXMsSXd0OY5MocYl3b5r4Q", 0}, + {7, "iTbk_EvsiprSwLLpC9LOgg", 1546590006885392010, 1, bpB, "google", "K7aFl5KIW_xKrUmfpJt6Zg", 5}, + {8, "RjbeqFM8weHtCSoL_pKurQ", 1546590006585839201, 1, bpB, "google", "iTbk_EvsiprSwLLpC9LOgg", 0}, + {9, "IPS7_Ttp7vdcice8EAWx0g", 1546590006919858504, 1, bpB, "google", "RjbeqFM8weHtCSoL_pKurQ", 0}, + {10, "er05e7FvAZOP3gP5_w_RKw", 1546590006857575843, 1, bpB, "google", "IPS7_Ttp7vdcice8EAWx0g", 3}, + {11, "f0_Dk_vFItabbmcnxNxrTA", 1546590200951918474, 1, bpB, "google", "er05e7FvAZOP3gP5_w_RKw", 0}, + {12, "1pkuZ0pk1d4lzItxrA73KQ", 1546590208582918459, 1, bpB, "google", "f0_Dk_vFItabbmcnxNxrTA", 0}, + {13, "WbhKd7fPzX2Mr8JFyVOljw", 1546590200101838483, 1, bpB, "google", "1pkuZ0pk1d4lzItxrA73KQ", 0}, + {14, "niLUTZpEpOWpPx011bZGlg", 1546590200058583818, 1, bpB, "google", "WbhKd7fPzX2Mr8JFyVOljw", 0}, + } + + transactionsMockData = [][]interface{}{ + {2, 0, "o362ksNHl8gIL4cbXjkMEQ", "pfp8ZcSwhg15W2YSaooX8g", 1546591119847974875, 1, addrA, `{}`}, + {7, 0, "CKI1kAfqOxWpmUug23OxTQ", "iTbk_EvsiprSwLLpC9LOgg", 1546591304102924848, 1, addrA, `{}`}, + {7, 1, "nLwnh4a9oiOG9n4FtgboRw", "iTbk_EvsiprSwLLpC9LOgg", 1546591304284859585, 4, addrB, `{}`}, + {7, 2, "mrsmkMHz1mcXwsOJDakLxA", "iTbk_EvsiprSwLLpC9LOgg", 1546591304583827173, 2, addrB, `{}`}, + {7, 3, "YrJ64M2odTb96B4VHIWCMw", "iTbk_EvsiprSwLLpC9LOgg", 1546591304847472713, 2, addrA, `{}`}, + {7, 4, "7iCSm4vy4FvAapGCT2p9MA", "iTbk_EvsiprSwLLpC9LOgg", 1546591304901837474, 1, addrB, `{}`}, + {10, 0, "U1s0IRuyLd3iw8PdlAKv4A", "er05e7FvAZOP3gP5_w_RKw", 1546591421847471717, 1, addrA, `{}`}, + {10, 1, "5MX357EQDlMUxZVPjjXeFQ", "er05e7FvAZOP3gP5_w_RKw", 1546591421791893744, 4, addrB, `{}`}, + {10, 2, "lXTWT_P7NRxMHukZCEUfng", "er05e7FvAZOP3gP5_w_RKw", 1546591421909181774, 2, addrB, `{}`}, + } +) + +func mockData(t *testing.T) { + db, err := models.OpenSQLiteDBAsGorp(testdb, "rw", 5, 2) + if err != nil { + t.Errorf("open testdb failed") + return + } + defer db.Db.Close() + + // create tables + for _, ddlSQL := range ddls { + if i, err := db.Exec(ddlSQL); err != nil { + t.Errorf("execute ddl #%d failed: %v", i, err) + } + } + + var insertRows = func(writeSQL string, data [][]interface{}) error { + for i, row := range data { + if _, err := db.Exec(writeSQL, row...); err != nil { + return errors.Wrapf(err, "write row #%d failed", i) + } + } + return nil + } + + if err := insertRows( + "insert into indexed_blocks values (?,?,?,?,?,?,?,?)", + blocksMockData, + ); err != nil { + t.Errorf("mock data for indexed_blocks failed: %v", err) + } + + if err := insertRows( + "insert into indexed_transactions values (?,?,?,?,?,?,?,?)", + transactionsMockData, + ); err != nil { + t.Errorf("mock data for indexed_transactions failed: %v", err) + } +} + +func setupWebsocketClient(addr string) (client *jsonrpc2.Conn, err error) { + // TODO: dial timeout + conn, _, err := websocket.DefaultDialer.DialContext( + context.Background(), + addr, + nil, + ) + if err != nil { + return nil, err + } + + var connOpts []jsonrpc2.ConnOpt + return jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + nil, + connOpts..., + ), nil +} + +type bpGetBlockTestCase struct { + Height int + Hash string + ExpectedResult []interface{} +} + +func (c *bpGetBlockTestCase) String() string { + return fmt.Sprintf("fetch block of height %d hashed %q", c.Height, c.Hash) +} + +type bpGetTransactionListTestCase struct { + Since string + Direction string + Limit int + ExpectedResults [][]interface{} +} + +func (c *bpGetTransactionListTestCase) Params() interface{} { + return []interface{}{c.Since, c.Direction, c.Limit} +} + +func (c *bpGetTransactionListTestCase) String() string { + return fmt.Sprintf("fetch %d transactions %s since %s", c.Limit, c.Direction, c.Since) +} + +type bpGetTransactionByHashTestCase struct { + Hash string + ExpectedResult []interface{} +} + +func (c *bpGetTransactionByHashTestCase) String() string { + return fmt.Sprintf("fetch transaction hashed %q", c.Hash) +} + +func TestService(t *testing.T) { + t.Logf("testdb: %s", testdb) + mockData(t) + defer os.Remove(testdb + "-shm") + defer os.Remove(testdb + "-wal") + defer os.Remove(testdb) + + port := 8546 + // log.SetLevel(log.DebugLevel) + service := api.NewService() + service.DBFile = testdb + service.WebsocketAddr = ":" + strconv.Itoa(port) + service.StartServers() + defer service.StopServersAndWait() + + var ( + addr = fmt.Sprintf("ws://localhost:%d", port) + callOpts []jsonrpc2.CallOption + + conveyBlock = func(convey C, item *models.Block, cp []interface{}) { + if cp == nil { + convey.So(item, ShouldBeNil) + return + } + convey.So(item.Height, ShouldEqual, cp[0].(int)) + convey.So(item.Hash, ShouldEqual, cp[1].(string)) + convey.So(item.Timestamp, ShouldEqual, cp[2].(int)) + convey.So(item.TimestampHuman.UnixNano(), ShouldEqual, item.Timestamp) + convey.So(item.Version, ShouldEqual, cp[3].(int)) + convey.So(item.Producer, ShouldEqual, cp[4].(string)) + convey.So(item.MerkleRoot, ShouldEqual, cp[5].(string)) + convey.So(item.Parent, ShouldEqual, cp[6].(string)) + convey.So(item.TxCount, ShouldEqual, cp[7].(int)) + } + + conveyTransaction = func(convey C, item *models.Transaction, cp []interface{}) { + if cp == nil { + convey.So(item, ShouldBeNil) + return + } + + convey.So(item.BlockHeight, ShouldEqual, cp[0].(int)) + convey.So(item.TxIndex, ShouldEqual, cp[1].(int)) + convey.So(item.Hash, ShouldEqual, cp[2].(string)) + convey.So(item.BlockHash, ShouldEqual, cp[3].(string)) + convey.So(item.Timestamp, ShouldEqual, cp[4].(int)) + convey.So(item.TimestampHuman.UnixNano(), ShouldEqual, item.Timestamp) + convey.So(item.TxType, ShouldEqual, cp[5].(int)) + convey.So(item.Address, ShouldEqual, cp[6].(string)) + convey.So(item.Raw, ShouldEqual, cp[7].(string)) + } + ) + + Convey("blocks API", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("bp_getBlockList should fail on invalid parameters", func() { + var ( + result []*models.Block + testCases = map[string][]int{ + "to-from < 5": {1, 5}, + "to-from > 100": {1, 102}, + } + ) + + for name, testCase := range testCases { + Convey(name, func() { + err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result, callOpts...) + So(err, ShouldNotBeNil) + }) + } + + }) + + Convey("bp_getBlockList should success on fetching valid number of blocks", func() { + var ( + result []*models.Block + testCases = [][]int{ + {1, 6}, + {1, 11}, + {2, 9}, + } + ) + + for i, testCase := range testCases { + from, to := testCase[0], testCase[1] + count := to - from + name := fmt.Sprintf("case#%d, fetch %d blocks [%d, %d)", i, count, from, to) + Convey(name, func(c C) { + + err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result, callOpts...) + So(err, ShouldBeNil) + So(len(result), ShouldEqual, count) + for i, item := range result { + cp := blocksMockData[count+from-2-i] + conveyBlock(c, item, cp) + } + }) + } + }) + + Convey("bp_getBlockByHash should fetch blocks on existed hash and nothing for an non-existed hash", func(c C) { + var ( + result = new(models.Block) + + testCases = []*bpGetBlockTestCase{ + {0, "o362ksNHl8gIL4cbXjkMEQ", nil}, + {0, "HGGcDJqO7tuZWwJyFxRl9g", blocksMockData[0]}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getBlockByHash", + []interface{}{testCase.Hash}, + &result, + callOpts..., + ) + So(err, ShouldBeNil) + conveyBlock(c, result, testCase.ExpectedResult) + }) + } + }) + + Convey("bp_getBlockByHeight should fetch blocks on existed height and nothing for an non-existed height", func(c C) { + var ( + result = new(models.Block) + + testCases = []*bpGetBlockTestCase{ + {192124141, "", nil}, + {1, "", blocksMockData[0]}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getBlockByHeight", + []interface{}{testCase.Height}, + &result, + callOpts..., + ) + So(err, ShouldBeNil) + conveyBlock(c, result, testCase.ExpectedResult) + }) + } + }) + + Reset(func() { + // teardown + rpc.Close() + }) + }) + + Convey("transactions API", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("bp_getTransactionList should fail on invalid parameters", func() { + var ( + result []*models.Transaction + invalidParameterCases = map[string][]interface{}{ + "limit < 5": {"nLwnh4a9oiOG9n4FtgboRw", "backward", 4}, + "limit > 100": {"nLwnh4a9oiOG9n4FtgboRw", "backward", 101}, + "unknown direction": {"nLwnh4a9oiOG9n4FtgboRw", "unknown", 10}, + } + ) + + for name, testCase := range invalidParameterCases { + Convey(name, func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionList", + testCase, + &result, + callOpts..., + ) + So(err, ShouldNotBeNil) + }) + } + }) + + Convey("bp_getTransactionList should success on fetching valid number of transactions", func(c C) { + var ( + result []*models.Transaction + testCases = []bpGetTransactionListTestCase{ + {"5MX357EQDlMUxZVPjjXeFQ", "backward", 5, transactionsMockData[2:7]}, + {"CKI1kAfqOxWpmUug23OxTQ", "backward", 5, transactionsMockData[0:1]}, + {"CKI1kAfqOxWpmUug23OxTQ", "forward", 7, transactionsMockData[2:9]}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionList", + testCase.Params(), + &result, + callOpts..., + ) + So(err, ShouldBeNil) + So(len(result), ShouldEqual, len(testCase.ExpectedResults)) + for i, item := range result { + cp := testCase.ExpectedResults[i] + if testCase.Direction == "backward" { + cp = testCase.ExpectedResults[len(result)-i-1] + } + conveyTransaction(c, item, cp) + } + }) + } + }) + + Convey("bp_getTransactionByHash should fetch transactions on existed hash and nothing for an non-existed hash", func(c C) { + var ( + result = new(models.Transaction) + + testCases = []*bpGetTransactionByHashTestCase{ + {"o362ksNHl8gIL4cbXjkMEQ", transactionsMockData[0]}, + {"HGGcDJqO7tuZWwJyFxRl9g", nil}, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionByHash", + []interface{}{testCase.Hash}, + &result, + callOpts..., + ) + So(err, ShouldBeNil) + conveyTransaction(c, result, testCase.ExpectedResult) + }) + } + }) + + Reset(func() { + rpc.Close() + }) + }) +} From 2404136d9efeaff77e7ddfcd658ff673004552b0 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 00:40:55 +0800 Subject: [PATCH 049/302] Fix timeout parameters --- cmd/cqld/bootstrap.go | 2 +- worker/db.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index d17fe6036..0cc0ad435 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -43,7 +43,7 @@ const ( kayakServiceName = "Kayak" kayakMethodName = "Call" kayakWalFileName = "kayak.ldb" - kayakPrepareTimeout = 10 * time.Second + kayakPrepareTimeout = 5 * time.Second kayakCommitTimeout = time.Minute ) diff --git a/worker/db.go b/worker/db.go index ab88e0526..0adc7d83c 100644 --- a/worker/db.go +++ b/worker/db.go @@ -57,7 +57,7 @@ const ( CommitThreshold = 1.0 // PrepareTimeout defines the prepare timeout config. - PrepareTimeout = 5 * time.Second + PrepareTimeout = 10 * time.Second // CommitTimeout defines the commit timeout config. CommitTimeout = time.Minute From af84ac2782a3db3246818c6e8e0cec436339f994 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 7 Jan 2019 00:42:54 +0800 Subject: [PATCH 050/302] Fix typo --- rpc/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rpc/server.go b/rpc/server.go index ceb42436a..21a2e6846 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -111,7 +111,7 @@ serverLoop: if err != nil { continue } - log.WithField("remote", conn.RemoteAddr().String()).Infof("accept") + log.WithField("remote", conn.RemoteAddr().String()).Info("accept") go s.handleConn(conn) } } From 56eb37e7be176bf49ccbf3af02b85e84e19131ed Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 4 Jan 2019 14:29:46 +0800 Subject: [PATCH 051/302] Change unit test bp.WaitDatabaseCreation to client.WaitDBCreation. --- cmd/cql-fuse/block_test.go | 5 ++--- cmd/cql-minerd/integration_test.go | 10 +++++----- cmd/cql-observer/observation_test.go | 4 ++-- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go index 6fed70bc2..05ef68967 100644 --- a/cmd/cql-fuse/block_test.go +++ b/cmd/cql-fuse/block_test.go @@ -49,7 +49,6 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -262,7 +261,7 @@ func initTestDB() (*sql.DB, func()) { log.Errorf("create db failed: %v", err) return nil, stopNodes } - dsnCfg, err := client.ParseDSN(dsn) + _, err = client.ParseDSN(dsn) if err != nil { log.Errorf("parse dsn failed: %v", err) return nil, stopNodes @@ -277,7 +276,7 @@ func initTestDB() (*sql.DB, func()) { // wait for creation var ctx2, cancel2 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel2() - err = bp.WaitDatabaseCreation(ctx2, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx2, dsn) if err != nil { log.Errorf("wait for creation failed: %v", err) return nil, stopNodes diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 04e897266..0f1a5c56c 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -398,7 +398,7 @@ func TestFullProcess(t *testing.T) { // wait for creation var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx, dsn) So(err, ShouldBeNil) // check sqlchain profile exist @@ -747,11 +747,11 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { So(err, ShouldBeNil) // wait for creation - dsnCfg, err := client.ParseDSN(dsn) + _, err = client.ParseDSN(dsn) So(err, ShouldBeNil) var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx, dsn) So(err, ShouldBeNil) benchDB(b, db, minerCount > 0) @@ -843,13 +843,13 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - dsnCfg, err := client.ParseDSN(dsn) + _, err = client.ParseDSN(dsn) So(err, ShouldBeNil) // wait for creation var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) + err = client.WaitDBCreation(ctx, dsn) So(err, ShouldBeNil) benchDB(b, db, minerCount > 0) diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 039f9bf28..0b3226a98 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -301,7 +301,7 @@ func TestFullProcess(t *testing.T) { dbID = cfg.DatabaseID ctx2, ccl2 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl2() - err = bp.WaitDatabaseCreation(ctx2, proto.DatabaseID(dbID), db, 3*time.Second) + err = client.WaitDBCreation(ctx2, dsn) So(err, ShouldBeNil) _, err = db.Exec("CREATE TABLE test (test int)") @@ -373,7 +373,7 @@ func TestFullProcess(t *testing.T) { So(dbID, ShouldNotResemble, dbID2) ctx3, ccl3 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl3() - err = bp.WaitDatabaseCreation(ctx3, proto.DatabaseID(dbID2), db2, 3*time.Second) + err = client.WaitDBCreation(ctx3, dsn2) So(err, ShouldBeNil) _, err = db2.Exec("CREATE TABLE test (test int)") From 1f9cef29e13f3c34739a35bbf66e4f26bc6b5e09 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 4 Jan 2019 18:44:06 +0800 Subject: [PATCH 052/302] WaitDBCreation now wait miner create complete. --- client/driver.go | 9 ++++++++- client/driver_test.go | 2 +- cmd/cql-fuse/block_test.go | 5 ----- cmd/cql-minerd/integration_test.go | 5 ----- cmd/cql-observer/observation_test.go | 3 +++ 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/client/driver.go b/client/driver.go index 0bf78092b..a28634d3e 100644 --- a/client/driver.go +++ b/client/driver.go @@ -195,8 +195,15 @@ func WaitDBCreation(ctx context.Context, dsn string) (err error) { if err != nil { return } + + db, err := sql.Open("covenantsql", dsn) + defer db.Close() + if err != nil { + return + } + // wait for creation - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) + err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) return } diff --git a/client/driver_test.go b/client/driver_test.go index 7425d1f85..94910b899 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -90,7 +90,7 @@ func TestCreate(t *testing.T) { UseLeader: true, }) - waitCtx2, cancelWait2 := context.WithTimeout(context.Background(), time.Minute) + waitCtx2, cancelWait2 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelWait2() err = WaitDBCreation(waitCtx2, dsn) So(err, ShouldBeNil) diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go index 05ef68967..6bd917e62 100644 --- a/cmd/cql-fuse/block_test.go +++ b/cmd/cql-fuse/block_test.go @@ -261,11 +261,6 @@ func initTestDB() (*sql.DB, func()) { log.Errorf("create db failed: %v", err) return nil, stopNodes } - _, err = client.ParseDSN(dsn) - if err != nil { - log.Errorf("parse dsn failed: %v", err) - return nil, stopNodes - } db, err := sql.Open("covenantsql", dsn) if err != nil { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 0f1a5c56c..3e6dcce1a 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -747,8 +747,6 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { So(err, ShouldBeNil) // wait for creation - _, err = client.ParseDSN(dsn) - So(err, ShouldBeNil) var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() err = client.WaitDBCreation(ctx, dsn) @@ -843,9 +841,6 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - _, err = client.ParseDSN(dsn) - So(err, ShouldBeNil) - // wait for creation var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 0b3226a98..278b75a1a 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -304,6 +304,9 @@ func TestFullProcess(t *testing.T) { err = client.WaitDBCreation(ctx2, dsn) So(err, ShouldBeNil) + _, err = db.ExecContext(ctx2, "SHOW TABLES") + So(err, ShouldBeNil) + _, err = db.Exec("CREATE TABLE test (test int)") So(err, ShouldBeNil) From 709310a45afbc122216da8e270c51971ae5d1a5e Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 10:55:34 +0800 Subject: [PATCH 053/302] Also build indexes in normal mode --- blockproducer/chain.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 3e969bb26..c59fd6bc0 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -715,9 +715,7 @@ func (c *Chain) replaceAndSwitchToBranch( sps = append(sps, addBlock(height, newBlock)) // Index blocks and transactions if running as API node - if c.mode == "api" { - sps = append(sps, buildBlockIndex(height, newBlock)) - } + sps = append(sps, buildBlockIndex(height, newBlock)) for k, v := range c.immutable.dirty.accounts { if v != nil { From ccc63597145e1c32047872eb0077970bcd543852 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 11:02:33 +0800 Subject: [PATCH 054/302] Fix test case for mock --- client/driver_test.go | 9 +++++++-- cmd/cql-observer/observation_test.go | 3 --- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/client/driver_test.go b/client/driver_test.go index 94910b899..c01ba54cf 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -68,10 +69,13 @@ func TestCreate(t *testing.T) { var dsn string dsn, err = Create(ResourceMeta{}) So(err, ShouldBeNil) + dsnCfg, err := ParseDSN(dsn) + So(err, ShouldBeNil) waitCtx, cancelWait := context.WithTimeout(context.Background(), time.Nanosecond) defer cancelWait() - err = WaitDBCreation(waitCtx, dsn) + // should not use client.WaitDBCreation, sql.Open is not supported in this test case + err = bp.WaitDatabaseCreation(waitCtx, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) So(err, ShouldResemble, context.DeadlineExceeded) // Calculate database ID @@ -92,7 +96,8 @@ func TestCreate(t *testing.T) { waitCtx2, cancelWait2 := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelWait2() - err = WaitDBCreation(waitCtx2, dsn) + // should not use client.WaitDBCreation, sql.Open is not supported in this test case + err = bp.WaitDatabaseCreation(waitCtx2, proto.DatabaseID(dsnCfg.DatabaseID), nil, 3*time.Second) So(err, ShouldBeNil) }) } diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 278b75a1a..0b3226a98 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -304,9 +304,6 @@ func TestFullProcess(t *testing.T) { err = client.WaitDBCreation(ctx2, dsn) So(err, ShouldBeNil) - _, err = db.ExecContext(ctx2, "SHOW TABLES") - So(err, ShouldBeNil) - _, err = db.Exec("CREATE TABLE test (test int)") So(err, ShouldBeNil) From 25f5038d71ddcb6834033067f756a75ee2aa3135 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 11:41:47 +0800 Subject: [PATCH 055/302] Add config field for target miners --- cmd/cql-minerd/provide_service.go | 5 +++++ conf/config.go | 7 ++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/provide_service.go b/cmd/cql-minerd/provide_service.go index a2e0f78b5..cc9a189b8 100644 --- a/cmd/cql-minerd/provide_service.go +++ b/cmd/cql-minerd/provide_service.go @@ -17,6 +17,7 @@ package main import ( + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -151,6 +152,10 @@ func sendProvideService(reg *prometheus.Registry) { }, ) + if conf.GConf.Miner != nil && len(conf.GConf.Miner.TargetUsers) > 0 { + tx.ProvideServiceHeader.TargetUser = conf.GConf.Miner.TargetUsers + } + tx.Nonce = nonceResp.Nonce if err = tx.Sign(privateKey); err != nil { diff --git a/conf/config.go b/conf/config.go index e2949a1c8..7f3914842 100644 --- a/conf/config.go +++ b/conf/config.go @@ -97,9 +97,10 @@ type MinerDatabaseFixture struct { // MinerInfo for miner config. type MinerInfo struct { // node basic config. - RootDir string `yaml:"RootDir"` - MaxReqTimeGap time.Duration `yaml:"MaxReqTimeGap,omitempty"` - ProvideServiceInterval time.Duration `yaml:"ProvideServiceInterval,omitempty"` + RootDir string `yaml:"RootDir"` + MaxReqTimeGap time.Duration `yaml:"MaxReqTimeGap,omitempty"` + ProvideServiceInterval time.Duration `yaml:"ProvideServiceInterval,omitempty"` + TargetUsers []proto.AccountAddress `yaml:"TargetUsers,omitempty"` // when test mode, fixture database config is used. IsTestMode bool `yaml:"IsTestMode,omitempty"` From 12a2e93cf5cf4cd81c808ff0018d30b78a43653d Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 12:22:29 +0800 Subject: [PATCH 056/302] Resolve conversation --- blockproducer/bpinfo.go | 2 +- blockproducer/chain.go | 24 ++++++++++++------------ blockproducer/chain_gossip.go | 6 +++--- blockproducer/chain_io.go | 2 +- blockproducer/rpc.go | 4 ++-- 5 files changed, 19 insertions(+), 19 deletions(-) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index f63c959d9..d89732dd3 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -34,7 +34,7 @@ func (i *blockProducerInfo) String() string { return fmt.Sprintf("[%d/%d|%s] %s", i.rank+1, i.total, i.role, i.nodeID) } -func newBlockProduerInfos( +func buildBlockProducerInfos( localNodeID proto.NodeID, peers *proto.Peers, ) ( localBPInfo *blockProducerInfo, bpInfos []*blockProducerInfo, err error, diff --git a/blockproducer/chain.go b/blockproducer/chain.go index b23b21067..83f1889e8 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -50,10 +50,10 @@ type Chain struct { wg *sync.WaitGroup // RPC components server *rpc.Server - cl *rpc.Caller + caller *rpc.Caller // Other components - st xi.Storage - bs chainbus.Bus + storage xi.Storage + chainBus chainbus.Bus // Channels for incoming blocks and transactions pendingBlocks chan *types.BPBlock pendingAddTxReqs chan *types.AddTxReq @@ -184,7 +184,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } // Setup peer list - if localBPInfo, bpInfos, err = newBlockProduerInfos(cfg.NodeID, cfg.Peers); err != nil { + if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers); err != nil { return } if t = cfg.ConfirmThreshold; t <= 0.0 { @@ -202,10 +202,10 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) wg: &sync.WaitGroup{}, server: cfg.Server, - cl: rpc.NewCaller(), + caller: rpc.NewCaller(), - st: st, - bs: bus, + storage: st, + chainBus: bus, pendingBlocks: make(chan *types.BPBlock), pendingAddTxReqs: make(chan *types.AddTxReq), @@ -265,7 +265,7 @@ func (c *Chain) Stop() (err error) { le.Debug("stopping chain") c.stop() le.Debug("chain service stopped") - c.st.Close() + c.storage.Close() le.Debug("chain database closed") // FIXME(leventeliu): RPC server should provide an `unregister` method to detach chain service @@ -602,7 +602,7 @@ func (c *Chain) storeTx(tx pi.Transaction) (err error) { return } - return store(c.st, []storageProcedure{addTx(tx)}, func() { + return store(c.storage, []storageProcedure{addTx(tx)}, func() { c.txPool[k] = tx for _, v := range c.branches { v.addTx(tx) @@ -725,7 +725,7 @@ func (c *Chain) replaceAndSwitchToBranch( } // Write to immutable database and update cache - if err = store(c.st, sps, up); err != nil { + if err = store(c.storage, sps, up); err != nil { c.immutable.clean() } // TODO(leventeliu): trigger ChainBus.Publish. @@ -774,7 +774,7 @@ func (c *Chain) applyBlock(bl *types.BPBlock) (err error) { } // Grow a branch while the current branch is not changed if br.head.count <= c.headBranch.head.count { - return store(c.st, + return store(c.storage, []storageProcedure{addBlock(height, bl)}, func() { br.preview.commit() @@ -804,7 +804,7 @@ func (c *Chain) applyBlock(bl *types.BPBlock) (err error) { err = errors.Wrapf(ierr, "failed to fork from %s", parent.hash.Short(4)) return } - return store(c.st, + return store(c.storage, []storageProcedure{addBlock(height, bl)}, func() { c.branches = append(c.branches, br) }, ) diff --git a/blockproducer/chain_gossip.go b/blockproducer/chain_gossip.go index ae45182ea..4080933c2 100644 --- a/blockproducer/chain_gossip.go +++ b/blockproducer/chain_gossip.go @@ -39,7 +39,7 @@ func (c *Chain) nonblockingBroadcastBlock(block *types.BPBlock) { }, Block: block, } - err = c.cl.CallNodeWithContext( + err = c.caller.CallNodeWithContext( ctx, remote.nodeID, route.MCCAdviseNewBlock.String(), req, nil) ) log.WithFields(log.Fields{ @@ -66,7 +66,7 @@ func (c *Chain) nonblockingBroadcastTx(ttl uint32, tx pi.Transaction) { TTL: ttl, Tx: tx, } - err = c.cl.CallNodeWithContext( + err = c.caller.CallNodeWithContext( ctx, remote.nodeID, route.MCCAddTx.String(), req, nil) ) log.WithFields(log.Fields{ @@ -109,7 +109,7 @@ func (c *Chain) blockingFetchBlock(ctx context.Context, h uint32) (unreachable u "remote": remote, "height": h, }) - if err = c.cl.CallNodeWithContext( + if err = c.caller.CallNodeWithContext( cld, remote.nodeID, route.MCCFetchBlock.String(), req, resp, ); err != nil { le.WithError(err).Warn("failed to fetch block") diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index bc835229a..e843b7e1c 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -33,7 +33,7 @@ func (c *Chain) loadBlock(h hash.Hash) (b *types.BPBlock, err error) { enc []byte out = &types.BPBlock{} ) - if err = c.st.Reader().QueryRow( + if err = c.storage.Reader().QueryRow( `SELECT "encoded" FROM "blocks" WHERE "hash"=?`, h.String(), ).Scan(&enc); err != nil { return diff --git a/blockproducer/rpc.go b/blockproducer/rpc.go index 2eb223b52..1b021614b 100644 --- a/blockproducer/rpc.go +++ b/blockproducer/rpc.go @@ -140,8 +140,8 @@ func (s *ChainRPCService) QueryTxState( // Sub is the RPC method to subscribe some event. func (s *ChainRPCService) Sub(req *types.SubReq, resp *types.SubResp) (err error) { - return s.chain.bs.Subscribe(req.Topic, func(request interface{}, response interface{}) { - s.chain.cl.CallNode(req.NodeID.ToNodeID(), req.Callback, request, response) + return s.chain.chainBus.Subscribe(req.Topic, func(request interface{}, response interface{}) { + s.chain.caller.CallNode(req.NodeID.ToNodeID(), req.Callback, request, response) }) } From d9508fd73d8980dcfb109b0287299947d2b4d5b0 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 12:28:35 +0800 Subject: [PATCH 057/302] Fix typo --- blockproducer/chain.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 83f1889e8..83e7bc381 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -561,7 +561,7 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { } // syncCurrentHead synchronizes a block at the current height of the local peer from the known -// remote peers. The return value `ok` indicates that there're at less `c.confirms-1` replies +// remote peers. The return value `ok` indicates that there're at least `c.confirms-1` replies // from these gossip calls. func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { var h = c.getNextHeight() - 1 From 8d6885d9d401bd93049b57818b229678708ab0ec Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 12:36:26 +0800 Subject: [PATCH 058/302] Move transaction limit to package limits --- blockproducer/branch.go | 9 ++++----- blockproducer/limits/limits.go | 2 ++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index cb1dc8686..3129f7c4f 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -22,6 +22,7 @@ import ( "time" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + pl "github.com/CovenantSQL/CovenantSQL/blockproducer/limits" ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" @@ -29,8 +30,6 @@ import ( "github.com/pkg/errors" ) -const transactionsLimit = 10000 - type branch struct { head *blockNode preview *metaState @@ -58,7 +57,7 @@ func newBranch( } // Apply new blocks to view and pool for _, bn := range list { - if len(bn.block.Transactions) > transactionsLimit { + if len(bn.block.Transactions) > pl.MaxPendingTxsPerAccount { return nil, ErrTooManyTransactionsInBlock } @@ -133,7 +132,7 @@ func (b *branch) applyBlock(n *blockNode) (br *branch, err error) { } var cpy = b.makeArena() - if len(n.block.Transactions) > transactionsLimit { + if len(n.block.Transactions) > pl.MaxTransactionsPerBlock { return nil, ErrTooManyTransactionsInBlock } @@ -186,7 +185,7 @@ func (b *branch) produceBlock( cpy = b.makeArena() txs = cpy.sortUnpackedTxs() ierr error - packCount = transactionsLimit + packCount = pl.MaxTransactionsPerBlock ) if len(txs) < packCount { diff --git a/blockproducer/limits/limits.go b/blockproducer/limits/limits.go index fab06d93d..1e1f96405 100644 --- a/blockproducer/limits/limits.go +++ b/blockproducer/limits/limits.go @@ -23,4 +23,6 @@ const ( MaxTxBroadcastTTL = 1 // MaxPendingTxsPerAccount defines the limit of pending transactions of one account. MaxPendingTxsPerAccount = 1000 + // MaxTransactionsPerBlock defines the limit of transactions per block. + MaxTransactionsPerBlock = 10000 ) From 2071b30f61dba110b5316bdae29786eac94f36a0 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 7 Jan 2019 14:08:00 +0800 Subject: [PATCH 059/302] Fix missing AccountAddress MarshalYAML UnmarshalYAML --- proto/nodeinfo.go | 10 ++++++++++ proto/nodeinfo_test.go | 27 +++++++++++++++++++++------ 2 files changed, 31 insertions(+), 6 deletions(-) diff --git a/proto/nodeinfo.go b/proto/nodeinfo.go index 9d19fa359..c6ef33b49 100644 --- a/proto/nodeinfo.go +++ b/proto/nodeinfo.go @@ -63,6 +63,16 @@ func (z AccountAddress) MarshalJSON() ([]byte, error) { return ((hash.Hash)(z)).MarshalJSON() } +// MarshalYAML implements the yaml.Marshaler interface. +func (z AccountAddress) MarshalYAML() (interface{}, error) { + return ((hash.Hash)(z)).MarshalYAML() +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (z *AccountAddress) UnmarshalYAML(unmarshal func(interface{}) error) error { + return ((*hash.Hash)(z)).UnmarshalYAML(unmarshal) +} + // NodeKey is node key on consistent hash ring, generate from Hash(NodeID). type NodeKey RawNodeID diff --git a/proto/nodeinfo_test.go b/proto/nodeinfo_test.go index fb0461fae..15ebf2895 100644 --- a/proto/nodeinfo_test.go +++ b/proto/nodeinfo_test.go @@ -96,7 +96,7 @@ func TestServerRoles_Contains(t *testing.T) { }) } -func unmarshalAndMarshal(str string) string { +func unmarshalAndMarshalServerRole(str string) string { var role ServerRole yaml.Unmarshal([]byte(str), &role) ret, _ := yaml.Marshal(role) @@ -104,16 +104,31 @@ func unmarshalAndMarshal(str string) string { return strings.TrimSpace(string(ret)) } +func unmarshalAndMarshalAccountAddress(str string) string { + var addr AccountAddress + yaml.Unmarshal([]byte(str), &addr) + ret, _ := yaml.Marshal(addr) + + return strings.TrimSpace(string(ret)) +} + +func TestAccountAddress_MarshalYAML(t *testing.T) { + Convey("marshal unmarshal yaml", t, func() { + So(unmarshalAndMarshalAccountAddress("6d5e7b36f5fa83d538539f31cf46682b0df3e0ecd192f2331dcf73e7e5ab5686"), + ShouldEqual, "6d5e7b36f5fa83d538539f31cf46682b0df3e0ecd192f2331dcf73e7e5ab5686") + }) +} + func TestServerRole_MarshalYAML(t *testing.T) { Convey("marshal unmarshal yaml", t, func() { var role ServerRole s, _ := role.MarshalYAML() So(s, ShouldResemble, "Unknown") - So(unmarshalAndMarshal("unknown"), ShouldEqual, "Unknown") - So(unmarshalAndMarshal("leader"), ShouldEqual, "Leader") - So(unmarshalAndMarshal("follower"), ShouldEqual, "Follower") - So(unmarshalAndMarshal("miner"), ShouldEqual, "Miner") - So(unmarshalAndMarshal("client"), ShouldEqual, "Client") + So(unmarshalAndMarshalServerRole("unknown"), ShouldEqual, "Unknown") + So(unmarshalAndMarshalServerRole("leader"), ShouldEqual, "Leader") + So(unmarshalAndMarshalServerRole("follower"), ShouldEqual, "Follower") + So(unmarshalAndMarshalServerRole("miner"), ShouldEqual, "Miner") + So(unmarshalAndMarshalServerRole("client"), ShouldEqual, "Client") }) } From 91d717aa97d2a95d79ca3d597c9594179df91c91 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 16:41:38 +0800 Subject: [PATCH 060/302] Add staging image tag --- Makefile | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Makefile b/Makefile index 43d6b462f..7bd59dd37 100644 --- a/Makefile +++ b/Makefile @@ -94,6 +94,12 @@ push_testnet: docker tag $(IMAGE):$(VERSION) $(IMAGE):testnet docker push $(IMAGE):testnet +push_staging: + docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):staging + docker push $(OB_IMAGE):staging + docker tag $(IMAGE):$(VERSION) $(IMAGE):staging + docker push $(IMAGE):staging + push: docker push $(OB_IMAGE):$(VERSION) docker push $(OB_IMAGE):latest From 1703e8dff97a68b212960aad83d66b6a0fa4531a Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 16:42:15 +0800 Subject: [PATCH 061/302] Resolve FIXME --- blockproducer/storage.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/storage.go b/blockproducer/storage.go index 8ebd485ec..414bed504 100644 --- a/blockproducer/storage.go +++ b/blockproducer/storage.go @@ -228,7 +228,7 @@ func buildBlockIndex(height uint32, b *types.BPBlock) storageProcedure { txIndex, t.Hash().String(), b.BlockHash().String(), - 0, // FIXME: use Transaction.GetTimestamp() + t.GetTimestamp().UnixNano(), t.GetTransactionType(), addr.String(), string(raw), From 9a967fb5d71ab0e35c192a8610ba5319e014afbf Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 7 Jan 2019 17:00:33 +0800 Subject: [PATCH 062/302] Query transaction confirmed state --- blockproducer/branch.go | 2 +- blockproducer/chain_io.go | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 3129f7c4f..cfa42b5af 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -240,7 +240,7 @@ func (b *branch) clearUnpackedTxs(txs []pi.Transaction) { } } -func (b *branch) queryTx(hash hash.Hash) (state pi.TransactionState, ok bool) { +func (b *branch) queryTxState(hash hash.Hash) (state pi.TransactionState, ok bool) { if _, ok = b.unpacked[hash]; ok { state = pi.TransactionStatePending return diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index e843b7e1c..27c09e405 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -140,10 +140,21 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err defer c.RUnlock() var ok bool state = pi.TransactionStateNotFound - if state, ok = c.headBranch.queryTx(hash); ok { + if state, ok = c.headBranch.queryTxState(hash); ok { return } - // TODO(leventeliu): get confirmed state from tx history. + + var ( + count int + querySQL = `select count(*) from indexed_transactions where hash = ?` + ) + if err = c.storage.Reader().QueryRow(querySQL, hash.String()).Scan(&count); err != nil { + return pi.TransactionStateNotFound, err + } + + if count > 0 { + return pi.TransactionStateConfirmed, nil + } return } From a284fba863012bc53ab5986916ed344bb3902d1d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 27 Dec 2018 18:29:53 +0800 Subject: [PATCH 063/302] Support query pattern regulation --- blockproducer/metastate.go | 19 +++-- blockproducer/metastate_test.go | 39 ++++----- client/driver.go | 2 +- client/helper_test.go | 2 +- cmd/cql-minerd/integration_test.go | 3 +- cmd/cql/main.go | 4 +- types/account.go | 95 +++++++++++++++++----- types/account_gen.go | 54 +++++++++++-- types/account_gen_test.go | 37 +++++++++ types/updatepermission.go | 2 +- types/updatepermission_gen.go | 18 ++++- types/xxx_test.go | 20 +---- worker/chainbusservice_test.go | 4 +- worker/dbms.go | 73 +++++++++++------ worker/dbms_test.go | 123 ++++++++++++++++++++++++++--- worker/helper_test.go | 8 +- 16 files changed, 381 insertions(+), 122 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 1dfaf3891..22aef9f75 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -458,7 +458,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI Users: []*types.SQLChainUser{ { Address: addr, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), }, }, } @@ -466,7 +466,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI } func (s *metaState) addSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error, ) { var ( src, dst *types.SQLChainProfile @@ -515,8 +515,7 @@ func (s *metaState) deleteSQLChainUser(k proto.DatabaseID, addr proto.AccountAdd } func (s *metaState) alterSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, -) { + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error) { var ( src, dst *types.SQLChainProfile ok bool @@ -703,7 +702,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) users := make([]*types.SQLChainUser, 1) users[0] = &types.SQLChainUser{ Address: sender, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, Deposit: minAdvancePayment, AdvancePayment: tx.AdvancePayment, @@ -878,7 +877,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { }).WithError(ErrDatabaseNotFound).Error("unexpected error in updatePermission") return ErrDatabaseNotFound } - if tx.Permission >= types.NumberOfUserPermission { + if !tx.Permission.IsValid() { log.WithFields(log.Fields{ "permission": tx.Permission, "dbID": tx.TargetSQLChain.DatabaseID(), @@ -891,8 +890,8 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { numOfAdmin := 0 targetUserIndex := -1 for i, u := range so.Users { - isAdmin = isAdmin || (sender == u.Address && u.Permission == types.Admin) - if u.Permission == types.Admin { + isAdmin = isAdmin || (sender == u.Address && u.Permission.HasAdminPermission()) + if u.Permission.HasAdminPermission() { numOfAdmin++ } if tx.TargetUser == u.Address { @@ -909,7 +908,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } // return error if number of Admin <= 1 and Admin want to revoke permission of itself - if numOfAdmin <= 1 && tx.TargetUser == sender && tx.Permission != types.Admin { + if numOfAdmin <= 1 && tx.TargetUser == sender && !tx.Permission.HasAdminPermission() { err = ErrNoAdminLeft log.WithFields(log.Fields{ "sender": sender.String(), @@ -947,7 +946,7 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { // check sender's permission isAdmin := false for _, user := range so.Users { - if sender == user.Address && user.Permission == types.Admin { + if sender == user.Address && user.Permission.HasAdminPermission() { isAdmin = true break } diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index f6bca9ac2..aedd69e87 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -106,11 +106,11 @@ func TestMetaState(t *testing.T) { Convey("The metaState should failed to operate SQLChain for unknown user", func() { err = ms.createSQLChain(addr1, dbID1) So(err, ShouldEqual, ErrAccountNotFound) - err = ms.addSQLChainUser(dbID1, addr1, types.Admin) + err = ms.addSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Admin)) So(err, ShouldEqual, ErrDatabaseNotFound) err = ms.deleteSQLChainUser(dbID1, addr1) So(err, ShouldEqual, ErrDatabaseNotFound) - err = ms.alterSQLChainUser(dbID1, addr1, types.Write) + err = ms.alterSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseNotFound) }) Convey("When new account and database objects are stored", func() { @@ -170,9 +170,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldEqual, ErrDatabaseExists) }) Convey("When new SQLChain users are added", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) Convey("The metaState object should be ok to delete user", func() { err = ms.deleteSQLChainUser(dbID3, addr2) @@ -181,9 +181,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) Convey("When metaState change is committed", func() { @@ -204,9 +204,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) }) @@ -214,9 +214,9 @@ func TestMetaState(t *testing.T) { Convey("When metaState change is committed", func() { ms.commit() Convey("The metaState object should be ok to add users for database", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) }) Convey("The metaState object should report database exists", func() { @@ -992,7 +992,7 @@ func TestMetaState(t *testing.T) { UpdatePermissionHeader: types.UpdatePermissionHeader{ TargetSQLChain: addr1, TargetUser: addr3, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Nonce: cd1.Nonce + 1, }, } @@ -1000,7 +1000,7 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrDatabaseNotFound) - up.Permission = 4 + up.Permission = types.UserPermissionFromRole(types.NumberOfUserPermission) up.TargetSQLChain = dbAccount err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1009,7 +1009,7 @@ func TestMetaState(t *testing.T) { // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 - up.Permission = types.Admin + up.Permission = types.UserPermissionFromRole(types.Admin) err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1018,7 +1018,7 @@ func TestMetaState(t *testing.T) { // addr3(admin) update addr4 as read up.TargetUser = addr4 up.Nonce = cd2.Nonce - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1034,7 +1034,7 @@ func TestMetaState(t *testing.T) { ms.commit() // addr3(admin) update addr3(admin) as read fail up.TargetUser = addr3 - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) up.Nonce = up.Nonce + 1 err = up.Sign(privKey3) So(err, ShouldBeNil) @@ -1050,15 +1050,18 @@ func TestMetaState(t *testing.T) { co, loaded = ms.loadSQLChainObject(dbID) for _, user := range co.Users { if user.Address == addr1 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } if user.Address == addr3 { - So(user.Permission, ShouldEqual, types.Admin) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Admin) continue } if user.Address == addr4 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } } diff --git a/client/driver.go b/client/driver.go index a28634d3e..1436071aa 100644 --- a/client/driver.go +++ b/client/driver.go @@ -259,7 +259,7 @@ func GetTokenBalance(tt types.TokenType) (balance uint64, err error) { // UpdatePermission sends UpdatePermission transaction to chain. func UpdatePermission(targetUser proto.AccountAddress, - targetChain proto.AccountAddress, perm types.UserPermission) (err error) { + targetChain proto.AccountAddress, perm *types.UserPermission) (err error) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return diff --git a/client/helper_test.go b/client/helper_test.go index 0db24c669..df069d0b1 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -175,7 +175,7 @@ func startTestService() (stopTestService func(), tempDir string, err error) { return } permStat := &types.PermStat{ - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, } err = dbms.UpdatePermission(dbID, proto.AccountAddress(addr), permStat) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 3e6dcce1a..3bfc6af21 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -429,7 +429,8 @@ func TestFullProcess(t *testing.T) { } permStat, ok := usersMap[clientAddr] So(ok, ShouldBeTrue) - So(permStat.Permission, ShouldEqual, types.Admin) + So(permStat.Permission, ShouldNotBeNil) + So(permStat.Permission.Role, ShouldEqual, types.Admin) So(permStat.Status, ShouldEqual, types.Normal) _, err = db.Exec("CREATE TABLE test (test int)") diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 1c0efd6a6..5806042a5 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -336,13 +336,13 @@ func main() { var p types.UserPermission p.FromString(perm.Perm) - if p > types.NumberOfUserPermission { + if p.Role > types.NumberOfUserPermission { log.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return } - err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p) if err != nil { log.WithError(err).Error("update permission failed") diff --git a/types/account.go b/types/account.go index 8f8b7295e..cfe16598a 100644 --- a/types/account.go +++ b/types/account.go @@ -35,12 +35,21 @@ const ( NumberOfRoles ) +// UserPermissionRole defines role of user permission including admin/write/read. +type UserPermissionRole int32 + // UserPermission defines permissions of a SQLChain user. -type UserPermission int32 +type UserPermission struct { + // User role to access database. + Role UserPermissionRole + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string +} const ( // Void defines the initial permission. - Void UserPermission = iota + Void UserPermissionRole = iota // Admin defines the admin user permission. Admin // Write defines the writer user permission. @@ -51,39 +60,83 @@ const ( NumberOfUserPermission ) -// CheckRead returns true if user owns read permission. -func (up *UserPermission) CheckRead() bool { - return *up >= Admin && *up < NumberOfUserPermission +// UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. +func UserPermissionFromRole(role UserPermissionRole) *UserPermission { + return &UserPermission{ + Role: role, + } +} + +// HasReadPermission returns true if user owns read permission. +func (up *UserPermission) HasReadPermission() bool { + if up == nil { + return false + } + return up.Role >= Admin && up.Role < NumberOfUserPermission +} + +// HasWritePermission returns true if user owns write permission. +func (up *UserPermission) HasWritePermission() bool { + if up == nil { + return false + } + return up.Role >= Admin && up.Role <= Write } -// CheckWrite returns true if user owns write permission. -func (up *UserPermission) CheckWrite() bool { - return *up >= Admin && *up <= Write +// HasAdminPermission returns true if user owns admin permission. +func (up *UserPermission) HasAdminPermission() bool { + if up == nil { + return false + } + return up.Role == Admin } -// CheckAdmin returns true if user owns admin permission. -func (up *UserPermission) CheckAdmin() bool { - return *up == Admin +// IsValid returns whether the permission object is valid or not. +func (up *UserPermission) IsValid() bool { + return up != nil && up.Role < NumberOfUserPermission && up.Role >= Admin } -// Valid returns true if the value is a meaning permission value. -func (up *UserPermission) Valid() bool { - return *up >= Admin && *up < NumberOfUserPermission +// HasDisallowedQueryPatterns returns whether the queries are permitted. +func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query string, status bool) { + if up == nil { + status = true + return + } + if len(up.Patterns) == 0 { + status = false + return + } + + // more queries than patterns + queryMap := make(map[string]bool, len(up.Patterns)) + for _, p := range up.Patterns { + queryMap[p] = true + } + for _, q := range queries { + if !queryMap[q.Pattern] { + // not permitted + query = q.Pattern + status = true + break + } + } + + return } // FromString converts string to UserPermission. func (up *UserPermission) FromString(perm string) { switch perm { case "Admin": - *up = Admin + up.Role = Admin case "Write": - *up = Write + up.Role = Write case "Read": - *up = Read + up.Role = Read case "Void": - *up = Void + up.Role = Void default: - *up = NumberOfUserPermission + up.Role = NumberOfUserPermission } } @@ -112,14 +165,14 @@ func (s *Status) EnableQuery() bool { // PermStat defines the permissions status structure. type PermStat struct { - Permission UserPermission + Permission *UserPermission Status Status } // SQLChainUser defines a SQLChain user. type SQLChainUser struct { Address proto.AccountAddress - Permission UserPermission + Permission *UserPermission AdvancePayment uint64 Arrears uint64 Deposit uint64 diff --git a/types/account_gen.go b/types/account_gen.go index e69e63642..43f20d353 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -271,9 +271,20 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 6 o = append(o, 0x86, 0x86) - o = hsp.AppendInt32(o, int32(z.Status)) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendInt32(o, int32(z.Permission.Role)) + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Permission.Patterns))) + for za0001 := range z.Permission.Patterns { + o = hsp.AppendString(o, z.Permission.Patterns[za0001]) + } + } o = append(o, 0x86) - o = hsp.AppendInt32(o, int32(z.Permission)) + o = hsp.AppendInt32(o, int32(z.Status)) o = append(o, 0x86) if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err @@ -291,7 +302,16 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainUser) Msgsize() (s int) { - s = 1 + 7 + hsp.Int32Size + 11 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + s = 1 + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Permission.Patterns { + s += hsp.StringPrefixSize + len(z.Permission.Patterns[za0001]) + } + } + s += 7 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size return } @@ -332,7 +352,31 @@ func (z *UserArrears) Msgsize() (s int) { } // MarshalHash marshals for hash -func (z UserPermission) MarshalHash() (o []byte, err error) { +func (z *UserPermission) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + o = hsp.AppendInt32(o, int32(z.Role)) + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Patterns))) + for za0001 := range z.Patterns { + o = hsp.AppendString(o, z.Patterns[za0001]) + } + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UserPermission) Msgsize() (s int) { + s = 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Patterns { + s += hsp.StringPrefixSize + len(z.Patterns[za0001]) + } + return +} + +// MarshalHash marshals for hash +func (z UserPermissionRole) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) o = hsp.AppendInt32(o, int32(z)) @@ -340,7 +384,7 @@ func (z UserPermission) MarshalHash() (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z UserPermission) Msgsize() (s int) { +func (z UserPermissionRole) Msgsize() (s int) { s = hsp.Int32Size return } diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 30e9ad803..388a19ddb 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -230,3 +230,40 @@ func BenchmarkAppendMsgUserArrears(b *testing.B) { bts, _ = v.MarshalHash() } } + +func TestMarshalHashUserPermission(t *testing.T) { + v := UserPermission{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUserPermission(b *testing.B) { + v := UserPermission{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUserPermission(b *testing.B) { + v := UserPermission{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/updatepermission.go b/types/updatepermission.go index 1b7ed46a6..729829c3d 100644 --- a/types/updatepermission.go +++ b/types/updatepermission.go @@ -30,7 +30,7 @@ import ( type UpdatePermissionHeader struct { TargetSQLChain proto.AccountAddress TargetUser proto.AccountAddress - Permission UserPermission + Permission *UserPermission Nonce interfaces.AccountNonce } diff --git a/types/updatepermission_gen.go b/types/updatepermission_gen.go index 443bfaa78..2fb9875b3 100644 --- a/types/updatepermission_gen.go +++ b/types/updatepermission_gen.go @@ -44,10 +44,14 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 4 o = append(o, 0x84, 0x84) - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err + if z.Permission == nil { + o = hsp.AppendNil(o) } else { - o = hsp.AppendBytes(o, oTemp) + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } } o = append(o, 0x84) if oTemp, err := z.Nonce.MarshalHash(); err != nil { @@ -72,6 +76,12 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermissionHeader) Msgsize() (s int) { - s = 1 + 11 + z.Permission.Msgsize() + 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() + s = 1 + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += z.Permission.Msgsize() + } + s += 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() return } diff --git a/types/xxx_test.go b/types/xxx_test.go index ab9d8f77f..cf2454ab6 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -68,7 +68,6 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro if err != nil { return - } h := hash.Hash{} @@ -96,8 +95,8 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro } err = b.PackAndSignBlock(priv) - return + return } func generateRandomBillingRequestHeader() *BillingRequestHeader { @@ -109,7 +108,6 @@ func generateRandomBillingRequestHeader() *BillingRequestHeader { HighHeight: rand.Int31(), GasAmounts: generateRandomGasAmount(peerNum), } - } func generateRandomBillingRequest() (req *BillingRequest, err error) { @@ -119,7 +117,6 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { } if _, err = req.PackRequestHeader(); err != nil { return nil, err - } for i := 0; i < peerNum; i++ { @@ -128,36 +125,29 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } - } return - } func generateRandomBillingHeader() (tc *BillingHeader, err error) { var req *BillingRequest if req, err = generateRandomBillingRequest(); err != nil { return - } var priv *asymmetric.PrivateKey if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } receivers := make([]*proto.AccountAddress, peerNum) @@ -169,33 +159,27 @@ func generateRandomBillingHeader() (tc *BillingHeader, err error) { receivers[i] = &accountAddress fees[i] = rand.Uint64() rewards[i] = rand.Uint64() - } producer := proto.AccountAddress(generateRandomHash()) tc = NewBillingHeader(pi.AccountNonce(rand.Uint32()), req, producer, receivers, fees, rewards) return tc, nil - } func generateRandomBilling() (*Billing, error) { header, err := generateRandomBillingHeader() if err != nil { return nil, err - } priv, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { return nil, err - } txBilling := NewBilling(header) if err := txBilling.Sign(priv); err != nil { return nil, err - } return txBilling, nil - } func generateRandomGasAmount(n int) []*proto.AddrAndGas { @@ -207,11 +191,9 @@ func generateRandomGasAmount(n int) []*proto.AddrAndGas { RawNodeID: proto.RawNodeID{Hash: generateRandomHash()}, GasAmount: rand.Uint64(), } - } return gasAmount - } func randBytes(n int) (b []byte) { diff --git a/worker/chainbusservice_test.go b/worker/chainbusservice_test.go index 2429061e6..8fd721e2d 100644 --- a/worker/chainbusservice_test.go +++ b/worker/chainbusservice_test.go @@ -94,7 +94,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) @@ -116,7 +116,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) diff --git a/worker/dbms.go b/worker/dbms.go index 9329ea6d1..82630c903 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -444,7 +444,7 @@ func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { if err != nil { return } - err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType) + err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType, req.Payload.Queries) if err != nil { return } @@ -499,32 +499,59 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { } func (dbms *DBMS) checkPermission(addr proto.AccountAddress, - dbID proto.DatabaseID, queryType types.QueryType) (err error) { + dbID proto.DatabaseID, queryType types.QueryType, queries []types.Query) (err error) { log.Debugf("in checkPermission, database id: %s, user addr: %s", dbID, addr.String()) - if permStat, ok := dbms.busService.RequestPermStat(dbID, addr); ok { - if !permStat.Status.EnableQuery() { - err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + var ( + permStat *types.PermStat + ok bool + ) + + // get database perm stat + permStat, ok = dbms.busService.RequestPermStat(dbID, addr) + + // perm stat not exists + if !ok { + err = errors.Wrap(ErrPermissionDeny, "database not exists") + return + } + + // check if query is enabled + if !permStat.Status.EnableQuery() { + err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + return + } + + // check query type permission + switch queryType { + case types.ReadQuery: + if !permStat.Permission.HasReadPermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) return } - if queryType == types.ReadQuery { - if !permStat.Permission.CheckRead() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) - return - } - } else if queryType == types.WriteQuery { - if !permStat.Permission.CheckWrite() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) - return - } - } else { - err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) + case types.WriteQuery: + if !permStat.Permission.HasWritePermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) return - } - } else { - err = errors.Wrap(ErrPermissionDeny, "database not exists") + default: + err = errors.Wrapf(ErrInvalidPermission, + "invalid permission, permission: %d", permStat.Permission) + return + } + + // check for query pattern + var ( + disallowedQuery string + hasDisallowedQuery bool + ) + + if disallowedQuery, hasDisallowedQuery = permStat.Permission.HasDisallowedQueryPatterns(queries); hasDisallowedQuery { + err = errors.Wrapf(ErrPermissionDeny, "disallowed query %s", disallowedQuery) + log.WithError(err).WithFields(log.Fields{ + "permission": permStat.Permission, + "query": disallowedQuery, + }).Debug("can not query") return } @@ -538,7 +565,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, log.WithFields(log.Fields{ "databaseID": dbID, "nodeID": nodeID, - }).WithError(err).Warning("get pubkey failed in addTxSubscription") + }).WithError(err).Warning("get public key failed in addTxSubscription") return } addr, err := crypto.PubKeyHash(pubkey) @@ -557,7 +584,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, "startHeight": startHeight, }).Debugf("addTxSubscription") - err = dbms.checkPermission(addr, dbID, types.ReadQuery) + err = dbms.checkPermission(addr, dbID, types.ReadQuery, nil) if err != nil { log.WithFields(log.Fields{"databaseID": dbID, "addr": addr}).WithError(err).Warning("permission deny") return diff --git a/worker/dbms_test.go b/worker/dbms_test.go index 4895b024f..db8d80cbe 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -134,11 +134,12 @@ func TestDBMS(t *testing.T) { // grant write and read permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Write, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) So(err, ShouldBeNil) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Write) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Write) So(userState.Status, ShouldEqual, types.Normal) Convey("success write and read", func() { @@ -193,10 +194,11 @@ func TestDBMS(t *testing.T) { // revoke write permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Read, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Read), Status: types.Normal}) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Read) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Read) So(userState.Status, ShouldEqual, types.Normal) Convey("success reading and fail to write", func() { @@ -229,10 +231,12 @@ func TestDBMS(t *testing.T) { // grant invalid permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Void, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Void), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Void) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Void) So(userState.Status, ShouldEqual, types.Normal) Convey("invalid permission query should fail", func() { @@ -264,10 +268,12 @@ func TestDBMS(t *testing.T) { // grant admin permission but in arrears err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Arrears}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Arrears}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Arrears) Convey("arrears query should fail", func() { @@ -296,10 +302,12 @@ func TestDBMS(t *testing.T) { // switch user to normal err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Normal) Convey("can send read and write queries", func() { @@ -346,6 +354,101 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) }) + // enforce query pattern regulations + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: &types.UserPermission{ + Role: types.Admin, + Patterns: []string{ + "create table test (test int)", + "SELECT 1", + "INSERT INTO TEST VALUES(1)", + }, + }, Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Permission.Patterns, ShouldHaveLength, 3) + + Convey("query patterns restrictions", func() { + var writeQuery *types.Request + var queryRes *types.Response + + // sending allowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 11, dbID, []string{ + "create table test (test int)", + "INSERT INTO TEST VALUES(1)", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldBeNil) + err = queryRes.Verify() + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, 0) + + // sending allowed read query + var readQuery *types.Request + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 12, dbID, []string{ + "SELECT 1", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldBeNil) + err = queryRes.Verify() + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) + So(queryRes.Payload.Rows, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) + + // sending disallowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 13, dbID, []string{ + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed write query mixed with valid write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 14, dbID, []string{ + "INSERT INTO TEST VALUES(1)", + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 15, dbID, []string{ + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 16, dbID, []string{ + "SELECT 1", + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + }) + + // set back permission object + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Status, ShouldEqual, types.Normal) + Convey("query non-existent database", func() { // sending write query var writeQuery *types.Request diff --git a/worker/helper_test.go b/worker/helper_test.go index e7b48b14f..d561f2ed6 100644 --- a/worker/helper_test.go +++ b/worker/helper_test.go @@ -100,22 +100,22 @@ var ( testNotExistAddr = proto.AccountAddress(hash.THashH([]byte{'a', 'a'})) testUser1 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal, } testUser2 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arrears, } testUser3 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Reminder, } testUser4 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arbitration, } ) From 0006e22b6316024d2e4fb5dcd2255a4a68f7fafa Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 27 Dec 2018 18:56:03 +0800 Subject: [PATCH 064/302] Use cache for query pattern permission matching --- types/account.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/types/account.go b/types/account.go index cfe16598a..7c563ef9d 100644 --- a/types/account.go +++ b/types/account.go @@ -17,6 +17,8 @@ package types import ( + "sync" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -45,6 +47,10 @@ type UserPermission struct { // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. Patterns []string + + // patterns map cache for matching + cachedPatternMapOnce sync.Once + cachedPatternMap map[string]bool } const ( @@ -107,13 +113,15 @@ func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query str return } - // more queries than patterns - queryMap := make(map[string]bool, len(up.Patterns)) - for _, p := range up.Patterns { - queryMap[p] = true - } + up.cachedPatternMapOnce.Do(func() { + up.cachedPatternMap = make(map[string]bool, len(up.Patterns)) + for _, p := range up.Patterns { + up.cachedPatternMap[p] = true + } + }) + for _, q := range queries { - if !queryMap[q.Pattern] { + if !up.cachedPatternMap[q.Pattern] { // not permitted query = q.Pattern status = true From e17b3c346ae501c83913ff26ce7adb689cd7855b Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 23:12:16 +0800 Subject: [PATCH 065/302] Make cql updatePermission feature compatible with sql pattern config --- cmd/cql/main.go | 23 +++++++---- types/account.go | 68 +++++++++++++++++++++++-------- types/account_test.go | 95 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 159 insertions(+), 27 deletions(-) create mode 100644 types/account_test.go diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 5806042a5..7827293bb 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -72,7 +72,7 @@ var ( type userPermission struct { TargetChain proto.AccountAddress `json:"chain"` TargetUser proto.AccountAddress `json:"user"` - Perm string `json:"perm"` + Perm json.RawMessage `json:"perm"` } type tranToken struct { @@ -335,16 +335,21 @@ func main() { } var p types.UserPermission - p.FromString(perm.Perm) - if p.Role > types.NumberOfUserPermission { - log.WithError(err).Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return - } - err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p) + if err := json.Unmarshal(perm.Perm, &p); err != nil { + // try again using role string representation + if err := json.Unmarshal(perm.Perm, &p.Role); err != nil { + log.WithError(err).Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } else if !p.IsValid() { + log.Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } + } - if err != nil { + if err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p); err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) return diff --git a/types/account.go b/types/account.go index 7c563ef9d..1fff65666 100644 --- a/types/account.go +++ b/types/account.go @@ -17,6 +17,7 @@ package types import ( + "encoding/json" "sync" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -43,10 +44,10 @@ type UserPermissionRole int32 // UserPermission defines permissions of a SQLChain user. type UserPermission struct { // User role to access database. - Role UserPermissionRole + Role UserPermissionRole `json:"role"` // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string + Patterns []string `json:"patterns"` // patterns map cache for matching cachedPatternMapOnce sync.Once @@ -66,6 +67,53 @@ const ( NumberOfUserPermission ) +// UnmarshalJSON implements the json.Unmarshler interface. +func (r *UserPermissionRole) UnmarshalJSON(data []byte) (err error) { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return + } + r.FromString(s) + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (r UserPermissionRole) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +// String implements the fmt.Stringer interface. +func (r UserPermissionRole) String() string { + switch r { + case Admin: + return "Admin" + case Write: + return "Write" + case Read: + return "Read" + case Void: + return "Void" + default: + return "Unknown" + } +} + +// FromString converts string to UserPermissionRole. +func (r *UserPermissionRole) FromString(perm string) { + switch perm { + case "Admin": + *r = Admin + case "Write": + *r = Write + case "Read": + *r = Read + case "Void": + *r = Void + default: + *r = NumberOfUserPermission + } +} + // UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. func UserPermissionFromRole(role UserPermissionRole) *UserPermission { return &UserPermission{ @@ -132,22 +180,6 @@ func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query str return } -// FromString converts string to UserPermission. -func (up *UserPermission) FromString(perm string) { - switch perm { - case "Admin": - up.Role = Admin - case "Write": - up.Role = Write - case "Read": - up.Role = Read - case "Void": - up.Role = Void - default: - up.Role = NumberOfUserPermission - } -} - // Status defines status of a SQLChain user/miner. type Status int32 diff --git a/types/account_test.go b/types/account_test.go new file mode 100644 index 000000000..7a7eb665e --- /dev/null +++ b/types/account_test.go @@ -0,0 +1,95 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "encoding/json" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestUserPermissionFromRole(t *testing.T) { + Convey("test marshal/unmarshal json", t, func() { + jsonBytes, err := json.Marshal(Read) + So(err, ShouldBeNil) + So(jsonBytes, ShouldResemble, []byte(`"Read"`)) + var r UserPermissionRole + So(r, ShouldEqual, Void) + err = json.Unmarshal([]byte(`"Write"`), &r) + So(err, ShouldBeNil) + So(r, ShouldEqual, Write) + }) + Convey("test string/from string", t, func() { + var r UserPermissionRole + So(r, ShouldEqual, Void) + r.FromString(Read.String()) + So(r, ShouldEqual, Read) + }) +} + +func TestUserPermission(t *testing.T) { + Convey("nil protect", t, func() { + p := (*UserPermission)(nil) + So(p.HasReadPermission(), ShouldBeFalse) + So(p.HasWritePermission(), ShouldBeFalse) + So(p.HasAdminPermission(), ShouldBeFalse) + So(p.IsValid(), ShouldBeFalse) + _, state := p.HasDisallowedQueryPatterns([]Query{}) + So(state, ShouldBeTrue) + }) + Convey("has read permission", t, func() { + So(UserPermissionFromRole(Void).HasReadPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasReadPermission(), ShouldBeFalse) + }) + Convey("has write permission", t, func() { + So(UserPermissionFromRole(Void).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasWritePermission(), ShouldBeFalse) + }) + Convey("has admin permission", t, func() { + So(UserPermissionFromRole(Void).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Admin).HasAdminPermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasAdminPermission(), ShouldBeFalse) + }) + Convey("is valid", t, func() { + So(UserPermissionFromRole(Void).IsValid(), ShouldBeFalse) + So(UserPermissionFromRole(Read).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Write).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).IsValid(), ShouldBeFalse) + }) + Convey("query patterns", t, func() { + // empty patterns limitation + _, state := UserPermissionFromRole(Read).HasDisallowedQueryPatterns([]Query{ + { + Pattern: "select 1", + }, + { + Pattern: "insert into test values(1)", + }, + }) + So(state, ShouldBeFalse) + }) +} From 91230b0b2687eaee077f4809cfe29febbc639e8d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 23:38:39 +0800 Subject: [PATCH 066/302] Move updatePermission json tag declaration into cql command package --- cmd/cql/main.go | 31 +++++++++++++++++++++++-------- types/account.go | 4 ++-- types/account_gen.go | 32 ++++++++++++++++++++++++++++++++ types/account_gen_test.go | 37 +++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 10 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 7827293bb..feff487c9 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -75,6 +75,14 @@ type userPermission struct { Perm json.RawMessage `json:"perm"` } +type userPermPayload struct { + // User role to access database. + Role types.UserPermissionRole `json:"role"` + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string `json:"patterns"` +} + type tranToken struct { TargetUser proto.AccountAddress `json:"addr"` Amount string `json:"amount"` @@ -334,22 +342,29 @@ func main() { return } - var p types.UserPermission + var permPayload userPermPayload - if err := json.Unmarshal(perm.Perm, &p); err != nil { + if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { // try again using role string representation - if err := json.Unmarshal(perm.Perm, &p.Role); err != nil { + if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { log.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return - } else if !p.IsValid() { - log.Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return } } - if err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p); err != nil { + p := &types.UserPermission{ + Role: permPayload.Role, + Patterns: permPayload.Patterns, + } + + if !p.IsValid() { + log.Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } + + if err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p); err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) return diff --git a/types/account.go b/types/account.go index 1fff65666..c98c58527 100644 --- a/types/account.go +++ b/types/account.go @@ -44,10 +44,10 @@ type UserPermissionRole int32 // UserPermission defines permissions of a SQLChain user. type UserPermission struct { // User role to access database. - Role UserPermissionRole `json:"role"` + Role UserPermissionRole // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string `json:"patterns"` + Patterns []string // patterns map cache for matching cachedPatternMapOnce sync.Once diff --git a/types/account_gen.go b/types/account_gen.go index 43f20d353..352302e7f 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -102,6 +102,38 @@ func (z *MinerInfo) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z *PermStat) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Status)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *PermStat) Msgsize() (s int) { + s = 1 + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += z.Permission.Msgsize() + } + s += 7 + hsp.Int32Size + return +} + // MarshalHash marshals for hash func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 388a19ddb..9b6a8a5d3 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -83,6 +83,43 @@ func BenchmarkAppendMsgMinerInfo(b *testing.B) { } } +func TestMarshalHashPermStat(t *testing.T) { + v := PermStat{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashPermStat(b *testing.B) { + v := PermStat{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgPermStat(b *testing.B) { + v := PermStat{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashProviderProfile(t *testing.T) { v := ProviderProfile{} binary.Read(rand.Reader, binary.BigEndian, &v) From 4e46c30d6b8bbabd8a2ed10e38804693986c7acf Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 8 Jan 2019 02:09:08 +0800 Subject: [PATCH 067/302] Fix ETLS.Accept block bug --- crypto/etls/conn_test.go | 26 +++++++++++++++++++++++--- crypto/etls/listener.go | 4 +++- rpc/rpcutil_test.go | 17 +++++++++++------ rpc/server.go | 8 +++++++- 4 files changed, 44 insertions(+), 11 deletions(-) diff --git a/crypto/etls/conn_test.go b/crypto/etls/conn_test.go index ffa9b1010..1c62bcff8 100644 --- a/crypto/etls/conn_test.go +++ b/crypto/etls/conn_test.go @@ -23,9 +23,11 @@ import ( "sync" "testing" + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" ) const service = "127.0.0.1:28000" @@ -178,6 +180,16 @@ func clientComplex(pass string, args *QueryComplex) (ret *ResultComplex, err err func handleClient(conn net.Conn) { defer conn.Close() + var err error + + if c, ok := conn.(*CryptoConn); ok { + conn, err = simpleCipherHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } + } + rpc.ServeConn(conn) log.Debugln("server: conn: closed") } @@ -272,8 +284,16 @@ func TestCryptoConn_RW(t *testing.T) { go func() { rBuf := make([]byte, len(msg)) conn, err := l.Accept() - cc, _ := conn.(*CryptoConn) - n, err := cc.Read(rBuf) + + if c, ok := conn.(*CryptoConn); ok { + conn, err = l.CHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } + } + + n, err := conn.Read(rBuf) c.So(n, ShouldEqual, len(msg)) c.So(string(rBuf), ShouldResemble, msg) c.So(err, ShouldBeNil) diff --git a/crypto/etls/listener.go b/crypto/etls/listener.go index b0bc8d5bd..cb350a254 100644 --- a/crypto/etls/listener.go +++ b/crypto/etls/listener.go @@ -45,7 +45,9 @@ func (l *CryptoListener) Accept() (net.Conn, error) { return nil, err } - return l.CHandler(c) + return &CryptoConn{ + Conn: c, + }, nil } // Close closes the listener. diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 00c51c87f..aed55ebb9 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -21,11 +21,12 @@ import ( "os" "path/filepath" "runtime" - "strings" "sync" "testing" "time" + . "github.com/smartystreets/goconvey/convey" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -33,7 +34,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - . "github.com/smartystreets/goconvey/convey" ) const ( @@ -226,10 +226,15 @@ func TestNewPersistentCaller(t *testing.T) { resp := new(proto.FindNeighborResp) err = client.Call("DHT.FindNeighbor", req, resp) - if err == nil || !strings.Contains(err.Error(), "not permitted") { - t.Fatal("anonymous ETLS connection used by " + - "RPC other than DHTPing shuold not permitted") - } + //if err == nil || !strings.Contains(err.Error(), "not permitted") { + // if err != nil { + // t.Errorf("unexpected error %s", err.Error()) + // } else { + // t.Errorf("unexpected resp %v", resp) + // } + // t.Fatal("anonymous ETLS connection used by " + + // "RPC other than DHTPing should not permitted") + //} // close anonymous ETLS connection, and create new one client.Close() diff --git a/rpc/server.go b/rpc/server.go index 21a2e6846..8bb834d3d 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -123,10 +123,16 @@ func (s *Server) handleConn(conn net.Conn) { // remote remoteNodeID connection awareness var remoteNodeID *proto.RawNodeID + var err error if c, ok := conn.(*etls.CryptoConn); ok { + conn, err = s.Listener.(*etls.CryptoListener).CHandler(c.Conn) + if err != nil { + err = errors.Wrap(err, "handle ETLS handler failed") + return + } // set node id - remoteNodeID = c.NodeID + remoteNodeID = conn.(*etls.CryptoConn).NodeID } sess, err := mux.Server(conn, YamuxConfig) From 0ab263a2c0f5acfa02e37f523b79a03fb3663edc Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 8 Jan 2019 02:11:08 +0800 Subject: [PATCH 068/302] Uncomment test --- rpc/rpcutil_test.go | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index aed55ebb9..2c3ccef32 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -21,6 +21,7 @@ import ( "os" "path/filepath" "runtime" + "strings" "sync" "testing" "time" @@ -226,15 +227,15 @@ func TestNewPersistentCaller(t *testing.T) { resp := new(proto.FindNeighborResp) err = client.Call("DHT.FindNeighbor", req, resp) - //if err == nil || !strings.Contains(err.Error(), "not permitted") { - // if err != nil { - // t.Errorf("unexpected error %s", err.Error()) - // } else { - // t.Errorf("unexpected resp %v", resp) - // } - // t.Fatal("anonymous ETLS connection used by " + - // "RPC other than DHTPing should not permitted") - //} + if err == nil || !strings.Contains(err.Error(), "not permitted") { + if err != nil { + t.Errorf("unexpected error %s", err.Error()) + } else { + t.Errorf("unexpected resp %v", resp) + } + t.Fatal("anonymous ETLS connection used by " + + "RPC other than DHTPing should not permitted") + } // close anonymous ETLS connection, and create new one client.Close() From 0e4eca4b89a8bbfd8e88d8995f16e947d47ced38 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 10:26:12 +0800 Subject: [PATCH 069/302] Add test case for ETLS bug --- rpc/server_test.go | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) diff --git a/rpc/server_test.go b/rpc/server_test.go index 04b92bcc8..0c089150e 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -185,6 +185,52 @@ func TestEncryptIncCounterSimpleArgs(t *testing.T) { server.Stop() } +func TestETLSBug(t *testing.T) { + defer os.Remove(PubKeyStorePath) + log.SetLevel(log.DebugLevel) + addr := "127.0.0.1:0" + masterKey := []byte("abc") + server, err := NewServerWithService(ServiceMap{"Test": NewTestService()}) + if err != nil { + log.Fatal(err) + } + + route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) + server.InitRPCServer(addr, "../keys/test.key", masterKey) + go server.Serve() + defer server.Stop() + + // This should not block listener + var rawConn net.Conn + rawConn, err = net.Dial("tcp", server.Listener.Addr().String()) + if err != nil { + log.Fatal(err) + } + defer rawConn.Close() + + publicKey, err := kms.GetLocalPublicKey() + nonce := asymmetric.GetPubKeyNonce(publicKey, 10, 100*time.Millisecond, nil) + serverNodeID := proto.NodeID(nonce.Hash.String()) + kms.SetPublicKey(serverNodeID, nonce.Nonce, publicKey) + kms.SetLocalNodeIDNonce(nonce.Hash.CloneBytes(), &nonce.Nonce) + route.SetNodeAddrCache(&proto.RawNodeID{Hash: nonce.Hash}, server.Listener.Addr().String()) + + cryptoConn, err := DialToNode(serverNodeID, nil, false) + cryptoConn.SetDeadline(time.Now().Add(3 * time.Second)) + client, err := InitClientConn(cryptoConn) + if err != nil { + log.Fatal(err) + } + defer client.Close() + + repSimple := new(int) + err = client.Call("Test.IncCounterSimpleArgs", 10, repSimple) + if err != nil { + log.Fatal(err) + } + CheckNum(*repSimple, 10, t) +} + func TestEncPingFindNeighbor(t *testing.T) { os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) From 7a158a12a3150f98da43ac0571d1e9f4dffc5fa4 Mon Sep 17 00:00:00 2001 From: laodouya Date: Sat, 29 Dec 2018 13:35:20 +0800 Subject: [PATCH 070/302] Add integration for bench testnet --- cmd/cql-minerd/integration_test.go | 69 ++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 3bfc6af21..6ca5768f6 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -926,3 +926,72 @@ func BenchmarkMinerGNTE8(b *testing.B) { benchGNTEMiner(b, 8, false) }) } + +func benchTestnetMiner(b *testing.B, minerCount uint16, testnetConfDir string) { + log.Warnf("benchmark for %d Miners", minerCount) + + // Create temp directory + testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") + if err != nil { + panic(err) + } + defer os.RemoveAll(testDataDir) + clientConf := FJ(testnetConfDir, "config.yaml") + tempConf := FJ(testDataDir, "config.yaml") + clientKey := FJ(testnetConfDir, "private.key") + tempKey := FJ(testDataDir, "private.key") + utils.CopyFile(clientConf, tempConf) + utils.CopyFile(clientKey, tempKey) + + err = client.Init(tempConf, []byte("")) + So(err, ShouldBeNil) + + dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn") + var dsn string + if minerCount > 0 { + // create + meta := client.ResourceMeta{} + meta.Node = minerCount + dsn, err = client.Create(meta) + So(err, ShouldBeNil) + log.Infof("the created database dsn is %v", dsn) + err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666) + if err != nil { + log.Errorf("write .dsn failed: %v", err) + } + defer os.Remove(dsnFile) + } else { + dsn = os.Getenv("DSN") + } + + var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + + // wait for creation + log.Infof("Wait dsn %v to create", dsn) + timeout := 10 * time.Minute + err = client.WaitDBCreation(ctx, dsn, timeout) + So(err, ShouldBeNil) + + db, err := sql.Open("covenantsql", dsn) + So(err, ShouldBeNil) + + benchDB(b, db, minerCount > 0) + +} + +func BenchmarkTestnetMiner1(b *testing.B) { + Convey("bench GNTE one node", b, func() { + benchTestnetMiner(b, 1, "") + }) +} +func BenchmarkTestnetMiner2(b *testing.B) { + Convey("bench GNTE one node", b, func() { + benchTestnetMiner(b, 2, "") + }) +} +func BenchmarkTestnetMiner3(b *testing.B) { + Convey("bench GNTE one node", b, func() { + benchTestnetMiner(b, 3, "") + }) +} From 2656b8faeeeee7b1d00fd1c5009a1e9e57d3c208 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 7 Jan 2019 10:58:33 +0800 Subject: [PATCH 071/302] Use testnet config in conf/testnet dir. --- cmd/cql-minerd/benchTestnet.sh | 11 +++++++++++ cmd/cql-minerd/integration_test.go | 28 ++++++++++++++-------------- 2 files changed, 25 insertions(+), 14 deletions(-) create mode 100755 cmd/cql-minerd/benchTestnet.sh diff --git a/cmd/cql-minerd/benchTestnet.sh b/cmd/cql-minerd/benchTestnet.sh new file mode 100755 index 000000000..68a59d35e --- /dev/null +++ b/cmd/cql-minerd/benchTestnet.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +make -C ../../ clean && \ +make -C ../../ use_all_cores +go test -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee gnte.log +go test -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a gnte.log +go test -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a gnte.log + +go test -cpu=1 -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee -a gnte.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a gnte.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a gnte.log diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 6ca5768f6..4bf93ac3c 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -927,7 +927,7 @@ func BenchmarkMinerGNTE8(b *testing.B) { }) } -func benchTestnetMiner(b *testing.B, minerCount uint16, testnetConfDir string) { +func benchTestnetMiner(b *testing.B, minerCount uint16) { log.Warnf("benchmark for %d Miners", minerCount) // Create temp directory @@ -936,9 +936,9 @@ func benchTestnetMiner(b *testing.B, minerCount uint16, testnetConfDir string) { panic(err) } defer os.RemoveAll(testDataDir) - clientConf := FJ(testnetConfDir, "config.yaml") + clientConf := FJ(baseDir, "./conf/testnet/config.yaml") tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(testnetConfDir, "private.key") + clientKey := FJ(baseDir, "./conf/testnet/private.key") tempKey := FJ(testDataDir, "private.key") utils.CopyFile(clientConf, tempConf) utils.CopyFile(clientKey, tempKey) @@ -964,34 +964,34 @@ func benchTestnetMiner(b *testing.B, minerCount uint16, testnetConfDir string) { dsn = os.Getenv("DSN") } - var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() + db, err := sql.Open("covenantsql", dsn) + So(err, ShouldBeNil) - // wait for creation - log.Infof("Wait dsn %v to create", dsn) - timeout := 10 * time.Minute - err = client.WaitDBCreation(ctx, dsn, timeout) + dsnCfg, err := client.ParseDSN(dsn) So(err, ShouldBeNil) - db, err := sql.Open("covenantsql", dsn) + // wait for creation + log.Infof("Wait dsn %v to create", dsn) + var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) So(err, ShouldBeNil) benchDB(b, db, minerCount > 0) - } func BenchmarkTestnetMiner1(b *testing.B) { Convey("bench GNTE one node", b, func() { - benchTestnetMiner(b, 1, "") + benchTestnetMiner(b, 1) }) } func BenchmarkTestnetMiner2(b *testing.B) { Convey("bench GNTE one node", b, func() { - benchTestnetMiner(b, 2, "") + benchTestnetMiner(b, 2) }) } func BenchmarkTestnetMiner3(b *testing.B) { Convey("bench GNTE one node", b, func() { - benchTestnetMiner(b, 3, "") + benchTestnetMiner(b, 3) }) } From ae077e8886ea6c53da30ff34d2c9ca5ee2adc891 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 7 Jan 2019 11:32:24 +0800 Subject: [PATCH 072/302] Print leader addr while bench testnet --- cmd/cql-minerd/integration_test.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 4bf93ac3c..d6ad51368 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -946,6 +946,12 @@ func benchTestnetMiner(b *testing.B, minerCount uint16) { err = client.Init(tempConf, []byte("")) So(err, ShouldBeNil) + for _, node := range conf.GConf.KnownNodes { + if node.Role == proto.Leader { + log.Infof("Benching started on bp addr: %v", node.Addr) + } + } + dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn") var dsn string if minerCount > 0 { @@ -981,17 +987,17 @@ func benchTestnetMiner(b *testing.B, minerCount uint16) { } func BenchmarkTestnetMiner1(b *testing.B) { - Convey("bench GNTE one node", b, func() { + Convey("bench testnet one node", b, func() { benchTestnetMiner(b, 1) }) } func BenchmarkTestnetMiner2(b *testing.B) { - Convey("bench GNTE one node", b, func() { + Convey("bench testnet one node", b, func() { benchTestnetMiner(b, 2) }) } func BenchmarkTestnetMiner3(b *testing.B) { - Convey("bench GNTE one node", b, func() { + Convey("bench testnet one node", b, func() { benchTestnetMiner(b, 3) }) } From 0f5a06387125c960d104ead5bf0e79597ff14478 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 14:38:07 +0800 Subject: [PATCH 073/302] Add test config for testnet bench --- test/bench_testnet/node_c/config.yaml | 152 ++++++++++++++++++++++++++ test/bench_testnet/node_c/private.key | 1 + 2 files changed, 153 insertions(+) create mode 100644 test/bench_testnet/node_c/config.yaml create mode 100644 test/bench_testnet/node_c/private.key diff --git a/test/bench_testnet/node_c/config.yaml b/test/bench_testnet/node_c/config.yaml new file mode 100644 index 000000000..9bade087e --- /dev/null +++ b/test/bench_testnet/node_c/config.yaml @@ -0,0 +1,152 @@ +IsTestMode: true +StartupSyncHoles: true +WorkingRoot: ./ +PubKeyStoreFile: public.keystore +PrivateKeyFile: private.key +DHTFileName: dht.db +ListenAddr: 0.0.0.0:15151 +ThisNodeID: 00000086571eeee68e89a00635dda04149ea4048a2c7165738fc0fb8287e42a7 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 +BlockProducer: + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + NodeID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 + Nonce: + a: 14396347928 + b: 0 + c: 0 + d: 6148914694092305796 + ChainFileName: chain.db + BPGenesisInfo: + Version: 1 + Producer: "0000000000000000000000000000000000000000000000000000000000000001" + MerkleRoot: "0000000000000000000000000000000000000000000000000000000000000001" + ParentHash: "0000000000000000000000000000000000000000000000000000000000000001" + Timestamp: 2019-01-02T13:33:00Z + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 58aceaf4b730b54bf00c0fb3f7b14886de470767f313c2d108968cd8bf0794b7 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 + Role: Leader + Addr: bp00.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 14396347928 + b: 0 + c: 0 + d: 6148914694092305796 +- ID: 000000000013fd4b3180dd424d5a895bc57b798e5315087b7198c926d8893f98 + Role: Follower + Addr: bp01.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 789554103 + b: 0 + c: 0 + d: 8070450536379825883 +- ID: 00000000001771e2b2e12b6f9f85d58ef5261a4b98a2e80bba0c5ef7bd72c499 + Role: Follower + Addr: bp02.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 1822880492 + b: 0 + c: 0 + d: 8646911286604382906 +- ID: 000000000014a2f14e79aec0a27a2a669aab416c392d5577760d43ed8503020d + Role: Follower + Addr: bp03.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 2552803966 + b: 0 + c: 0 + d: 9079256850862786277 +- ID: 00000000003b2bd120a7d07f248b181fc794ba8b278f07f9a780e61eb77f6abb + Role: Follower + Addr: bp04.hk.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 2449538793 + b: 0 + c: 0 + d: 8791026473473316840 +- ID: 0000000000293f7216362791b6b1c9772184d6976cb34310c42547735410186c + Role: Follower + Addr: bp05.cn.gridb.io:7777 + PublicKey: 02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c + Nonce: + a: 746598970 + b: 0 + c: 0 + d: 10808639108098016056 +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Role: Miner + Addr: miner00.cn.gridb.io:7778 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Role: Miner + Addr: miner01.cn.gridb.io:7778 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Role: Client + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 +- ID: 00000086571eeee68e89a00635dda04149ea4048a2c7165738fc0fb8287e42a7 + Role: Client + Addr: 0.0.0.0:15151 + PublicKey: 039578c9edf700bf847eef8d24369a12c0aabbd56abefc9c5beb773fed969fe9f8 + Nonce: + a: 708150 + b: 0 + c: 0 + d: 1082761333 +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 10s +BPTick: 3s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/test/bench_testnet/node_c/private.key b/test/bench_testnet/node_c/private.key new file mode 100644 index 000000000..558a164e7 --- /dev/null +++ b/test/bench_testnet/node_c/private.key @@ -0,0 +1 @@ +MdC1n849xkeyn5nRTp2rqbLz1bSj4KrTrPawLLHSPPikNLffhHY7xUntkKPbT67o4uupu7DEUTsR5P27HphpRsdYaebu8T \ No newline at end of file From 02e86d54779dcf45adce2bc8efaba848443633f4 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 8 Jan 2019 14:54:31 +0800 Subject: [PATCH 074/302] Refactor intergration test: combine benchGNTEMiner and benchTestnetMiner to benchOutsideMiner --- cmd/cql-minerd/integration_test.go | 111 ++++++++--------------------- 1 file changed, 28 insertions(+), 83 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index d6ad51368..d7641af95 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -53,6 +53,8 @@ import ( var ( baseDir = utils.GetProjectSrcDir() testWorkingDir = FJ(baseDir, "./test/") + gnteConfDir = FJ(testWorkingDir, "./GNTE/conf/node_c/") + testnetConfDir = FJ(testWorkingDir, "./bench_testnet/node_c/") logDir = FJ(testWorkingDir, "./log/") testGasPrice uint64 = 1 testAdvancePayment uint64 = 20000000 @@ -792,9 +794,8 @@ func BenchmarkSQLite(b *testing.B) { }) } -func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { - log.Warnf("benchmark GNTE for %d Miners, BypassSignature: %v", minerCount, bypassSign) - asymmetric.BypassSignature = bypassSign +func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { + log.Warnf("benchmark %v for %d Miners:", confDir, minerCount) // Create temp directory testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") @@ -802,9 +803,9 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { panic(err) } defer os.RemoveAll(testDataDir) - clientConf := FJ(testWorkingDir, "./GNTE/conf/node_c/config.yaml") + clientConf := FJ(confDir, "config.yaml") tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(testWorkingDir, "./GNTE/conf/node_c/private.key") + clientKey := FJ(confDir, "private.key") tempKey := FJ(testDataDir, "private.key") utils.CopyFile(clientConf, tempConf) utils.CopyFile(clientKey, tempKey) @@ -812,6 +813,12 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { err = client.Init(tempConf, []byte("")) So(err, ShouldBeNil) + for _, node := range conf.GConf.KnownNodes { + if node.Role == proto.Leader { + log.Infof("Benching started on bp addr: %v", node.Addr) + } + } + dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn") var dsn string if minerCount > 0 { @@ -830,11 +837,19 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { dsn, err = client.Create(meta) So(err, ShouldBeNil) log.Infof("the created database dsn is %v", dsn) + + // wait for creation + var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) + defer cancel() + err = client.WaitDBCreation(ctx, dsn) + So(err, ShouldBeNil) + err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666) if err != nil { log.Errorf("write .dsn failed: %v", err) } defer os.Remove(dsnFile) + defer client.Drop(dsn) } else { dsn = os.Getenv("DSN") } @@ -842,18 +857,7 @@ func benchGNTEMiner(b *testing.B, minerCount uint16, bypassSign bool) { db, err := sql.Open("covenantsql", dsn) So(err, ShouldBeNil) - // wait for creation - var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - err = client.WaitDBCreation(ctx, dsn) - So(err, ShouldBeNil) - benchDB(b, db, minerCount > 0) - - err = client.Drop(dsn) - So(err, ShouldBeNil) - time.Sleep(5 * time.Second) - stopNodes() } func BenchmarkMinerOneNoSign(b *testing.B) { @@ -900,104 +904,45 @@ func BenchmarkClientOnly(b *testing.B) { func BenchmarkMinerGNTE1(b *testing.B) { Convey("bench GNTE one node", b, func() { - benchGNTEMiner(b, 1, false) + benchOutsideMiner(b, 1, gnteConfDir) }) } func BenchmarkMinerGNTE2(b *testing.B) { Convey("bench GNTE two node", b, func() { - benchGNTEMiner(b, 2, false) + benchOutsideMiner(b, 2, gnteConfDir) }) } func BenchmarkMinerGNTE3(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 3, false) + benchOutsideMiner(b, 3, gnteConfDir) }) } func BenchmarkMinerGNTE4(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 4, false) + benchOutsideMiner(b, 4, gnteConfDir) }) } func BenchmarkMinerGNTE8(b *testing.B) { Convey("bench GNTE three node", b, func() { - benchGNTEMiner(b, 8, false) + benchOutsideMiner(b, 8, gnteConfDir) }) } -func benchTestnetMiner(b *testing.B, minerCount uint16) { - log.Warnf("benchmark for %d Miners", minerCount) - - // Create temp directory - testDataDir, err := ioutil.TempDir(testWorkingDir, "covenantsql") - if err != nil { - panic(err) - } - defer os.RemoveAll(testDataDir) - clientConf := FJ(baseDir, "./conf/testnet/config.yaml") - tempConf := FJ(testDataDir, "config.yaml") - clientKey := FJ(baseDir, "./conf/testnet/private.key") - tempKey := FJ(testDataDir, "private.key") - utils.CopyFile(clientConf, tempConf) - utils.CopyFile(clientKey, tempKey) - - err = client.Init(tempConf, []byte("")) - So(err, ShouldBeNil) - - for _, node := range conf.GConf.KnownNodes { - if node.Role == proto.Leader { - log.Infof("Benching started on bp addr: %v", node.Addr) - } - } - - dsnFile := FJ(baseDir, "./cmd/cql-minerd/.dsn") - var dsn string - if minerCount > 0 { - // create - meta := client.ResourceMeta{} - meta.Node = minerCount - dsn, err = client.Create(meta) - So(err, ShouldBeNil) - log.Infof("the created database dsn is %v", dsn) - err = ioutil.WriteFile(dsnFile, []byte(dsn), 0666) - if err != nil { - log.Errorf("write .dsn failed: %v", err) - } - defer os.Remove(dsnFile) - } else { - dsn = os.Getenv("DSN") - } - - db, err := sql.Open("covenantsql", dsn) - So(err, ShouldBeNil) - - dsnCfg, err := client.ParseDSN(dsn) - So(err, ShouldBeNil) - - // wait for creation - log.Infof("Wait dsn %v to create", dsn) - var ctx, cancel = context.WithTimeout(context.Background(), 5*time.Minute) - defer cancel() - err = bp.WaitDatabaseCreation(ctx, proto.DatabaseID(dsnCfg.DatabaseID), db, 3*time.Second) - So(err, ShouldBeNil) - - benchDB(b, db, minerCount > 0) -} - func BenchmarkTestnetMiner1(b *testing.B) { Convey("bench testnet one node", b, func() { - benchTestnetMiner(b, 1) + benchOutsideMiner(b, 1, testnetConfDir) }) } func BenchmarkTestnetMiner2(b *testing.B) { Convey("bench testnet one node", b, func() { - benchTestnetMiner(b, 2) + benchOutsideMiner(b, 2, testnetConfDir) }) } func BenchmarkTestnetMiner3(b *testing.B) { Convey("bench testnet one node", b, func() { - benchTestnetMiner(b, 3) + benchOutsideMiner(b, 3, testnetConfDir) }) } From d853112fcdd2749ed88ab3682cdf791daec6ab9a Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 8 Jan 2019 15:39:22 +0800 Subject: [PATCH 075/302] Support bench custom miner by get miner_conf_dir env var. --- cmd/cql-minerd/benchCustom.sh | 12 ++++++++++++ cmd/cql-minerd/benchTestnet.sh | 12 ++++++------ cmd/cql-minerd/integration_test.go | 21 +++++++++++++++++++++ 3 files changed, 39 insertions(+), 6 deletions(-) create mode 100755 cmd/cql-minerd/benchCustom.sh diff --git a/cmd/cql-minerd/benchCustom.sh b/cmd/cql-minerd/benchCustom.sh new file mode 100755 index 000000000..a9af2707f --- /dev/null +++ b/cmd/cql-minerd/benchCustom.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +make -C ../../ clean && \ +make -C ../../ use_all_cores +export miner_conf_dir=$PWD/../../conf/testnet +go test -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee custom_miner.log +go test -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log + +go test -cpu=1 -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -cpu=1 -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log +go test -cpu=1 -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log diff --git a/cmd/cql-minerd/benchTestnet.sh b/cmd/cql-minerd/benchTestnet.sh index 68a59d35e..81e0c2912 100755 --- a/cmd/cql-minerd/benchTestnet.sh +++ b/cmd/cql-minerd/benchTestnet.sh @@ -2,10 +2,10 @@ make -C ../../ clean && \ make -C ../../ use_all_cores -go test -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee gnte.log -go test -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a gnte.log +go test -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee testnet.log +go test -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log -go test -cpu=1 -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a gnte.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log +go test -cpu=1 -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index d7641af95..0c40c4b70 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -907,6 +907,7 @@ func BenchmarkMinerGNTE1(b *testing.B) { benchOutsideMiner(b, 1, gnteConfDir) }) } + func BenchmarkMinerGNTE2(b *testing.B) { Convey("bench GNTE two node", b, func() { benchOutsideMiner(b, 2, gnteConfDir) @@ -936,13 +937,33 @@ func BenchmarkTestnetMiner1(b *testing.B) { benchOutsideMiner(b, 1, testnetConfDir) }) } + func BenchmarkTestnetMiner2(b *testing.B) { Convey("bench testnet one node", b, func() { benchOutsideMiner(b, 2, testnetConfDir) }) } + func BenchmarkTestnetMiner3(b *testing.B) { Convey("bench testnet one node", b, func() { benchOutsideMiner(b, 3, testnetConfDir) }) } + +func BenchmarkCustomMiner1(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 1, os.Getenv("miner_conf_dir")) + }) +} + +func BenchmarkCustomMiner2(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 2, os.Getenv("miner_conf_dir")) + }) +} + +func BenchmarkCustomMiner3(b *testing.B) { + Convey("bench custom one node", b, func() { + benchOutsideMiner(b, 3, os.Getenv("miner_conf_dir")) + }) +} From 4f74eb6abe5e9dbfd04b9e1a893d3a823ecf8bd6 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 8 Jan 2019 16:20:37 +0800 Subject: [PATCH 076/302] Fix testnet conf dir location. --- cmd/cql-minerd/benchCustom.sh | 2 +- cmd/cql-minerd/integration_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cql-minerd/benchCustom.sh b/cmd/cql-minerd/benchCustom.sh index a9af2707f..6590d3b74 100755 --- a/cmd/cql-minerd/benchCustom.sh +++ b/cmd/cql-minerd/benchCustom.sh @@ -2,7 +2,7 @@ make -C ../../ clean && \ make -C ../../ use_all_cores -export miner_conf_dir=$PWD/../../conf/testnet +export miner_conf_dir=$PWD/../../test/bench_testnet/node_c go test -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee custom_miner.log go test -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log go test -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 0c40c4b70..77f47ae76 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -54,7 +54,7 @@ var ( baseDir = utils.GetProjectSrcDir() testWorkingDir = FJ(baseDir, "./test/") gnteConfDir = FJ(testWorkingDir, "./GNTE/conf/node_c/") - testnetConfDir = FJ(testWorkingDir, "./bench_testnet/node_c/") + testnetConfDir = FJ(baseDir, "./conf/testnet/") logDir = FJ(testWorkingDir, "./log/") testGasPrice uint64 = 1 testAdvancePayment uint64 = 20000000 From 96c7d6a2e016893d9829505676d9e52f24fa1427 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 17:09:59 +0800 Subject: [PATCH 077/302] Move sign, indexing, and xenomint tracker updating to the outside of kayak apply --- sqlchain/ackindex.go | 71 ++++++++++++++++++++---------------------- sqlchain/chain.go | 22 +++++-------- sqlchain/chain_test.go | 2 +- worker/db.go | 46 ++++++++++++++++++++++----- worker/db_storage.go | 24 ++++++++++++-- 5 files changed, 102 insertions(+), 63 deletions(-) diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go index 1e596db45..8fab0d349 100644 --- a/sqlchain/ackindex.go +++ b/sqlchain/ackindex.go @@ -32,15 +32,12 @@ var ( ackTrackerCount int32 ) -type ackTracker struct { - resp *types.SignedResponseHeader - ack *types.SignedAckHeader -} - type multiAckIndex struct { sync.RWMutex - ri map[types.QueryKey]*types.SignedResponseHeader // ri is the index of queries without acks - qi map[types.QueryKey]*ackTracker // qi is the index of query trackers + // respIndex is the index of query responses without acks + respIndex map[types.QueryKey]*types.SignedResponseHeader + // ackIndex is the index of acknowledged queries + ackIndex map[types.QueryKey]*types.SignedAckHeader } func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error) { @@ -48,14 +45,14 @@ func (i *multiAckIndex) addResponse(resp *types.SignedResponseHeader) (err error log.Debugf("adding key %s <-- resp %s", &key, resp.Hash()) i.Lock() defer i.Unlock() - if oresp, ok := i.ri[key]; ok { + if oresp, ok := i.respIndex[key]; ok { if oresp.Hash() != resp.Hash() { err = errors.Wrapf(ErrResponseSeqNotMatch, "add key %s <-- resp %s", &key, resp.Hash()) return } return } - i.ri[key] = resp + i.respIndex[key] = resp atomic.AddInt32(&responseCount, 1) return } @@ -70,15 +67,15 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { i.Lock() defer i.Unlock() - if resp, ok = i.ri[key]; !ok { + if resp, ok = i.respIndex[key]; !ok { err = errors.Wrapf(ErrQueryNotFound, "register key %s <-- ack %s", &key, ack.Hash()) return } - delete(i.ri, key) - i.qi[key] = &ackTracker{ - resp: resp, - ack: ack, + if resp.Hash() != ack.ResponseHash() { + err = errors.Wrapf(ErrResponseSeqNotMatch, "register key %s <-- ack %s", &key, ack.Hash()) } + delete(i.respIndex, key) + i.ackIndex[key] = ack atomic.AddInt32(&responseCount, -1) atomic.AddInt32(&ackTrackerCount, 1) return @@ -89,18 +86,18 @@ func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { log.Debugf("removing key %s -x- ack %s", &key, ack.Hash()) i.Lock() defer i.Unlock() - if _, ok := i.ri[key]; ok { - delete(i.ri, key) + if _, ok := i.respIndex[key]; ok { + delete(i.respIndex, key) atomic.AddInt32(&responseCount, -1) return } - if oack, ok := i.qi[key]; ok { - if oack.ack.Hash() != ack.Hash() { + if oack, ok := i.ackIndex[key]; ok { + if oack.Hash() != ack.Hash() { err = errors.Wrapf( ErrMultipleAckOfSeqNo, "remove key %s -x- ack %s", &key, ack.Hash()) return } - delete(i.qi, key) + delete(i.ackIndex, key) atomic.AddInt32(&ackTrackerCount, -1) return } @@ -111,8 +108,8 @@ func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { func (i *multiAckIndex) acks() (ret []*types.SignedAckHeader) { i.RLock() defer i.RUnlock() - for _, v := range i.qi { - ret = append(ret, v.ack) + for _, v := range i.ackIndex { + ret = append(ret, v) } return } @@ -121,7 +118,7 @@ func (i *multiAckIndex) expire() { i.RLock() defer i.RUnlock() // TODO(leventeliu): need further processing. - for _, v := range i.ri { + for _, v := range i.respIndex { log.WithFields(log.Fields{ "request_hash": v.Request.Hash(), "request_time": v.Request.Timestamp, @@ -132,18 +129,18 @@ func (i *multiAckIndex) expire() { "response_time": v.Timestamp, }).Warn("query expires without acknowledgement") } - for _, v := range i.qi { + for _, v := range i.ackIndex { log.WithFields(log.Fields{ - "request_hash": v.resp.Request.Hash(), - "request_time": v.resp.Request.Timestamp, - "request_type": v.resp.Request.QueryType, - "request_node": v.resp.Request.NodeID, - "response_hash": v.ack.Response.Hash(), - "response_node": v.ack.Response.NodeID, - "response_time": v.ack.Response.Timestamp, - "ack_hash": v.ack.Hash(), - "ack_node": v.ack.NodeID, - "ack_time": v.ack.Timestamp, + "request_hash": v.Response.Request.Hash(), + "request_time": v.Response.Request.Timestamp, + "request_type": v.Response.Request.QueryType, + "request_node": v.Response.Request.NodeID, + "response_hash": v.Response.Hash(), + "response_node": v.Response.NodeID, + "response_time": v.Response.Timestamp, + "ack_hash": v.Hash(), + "ack_node": v.NodeID, + "ack_time": v.Timestamp, }).Warn("query expires without block producing") } } @@ -171,8 +168,8 @@ func (i *ackIndex) load(h int32) (mi *multiAckIndex, err error) { } if mi, ok = i.hi[h]; !ok { mi = &multiAckIndex{ - ri: make(map[types.QueryKey]*types.SignedResponseHeader), - qi: make(map[types.QueryKey]*ackTracker), + respIndex: make(map[types.QueryKey]*types.SignedResponseHeader), + ackIndex: make(map[types.QueryKey]*types.SignedAckHeader), } i.hi[h] = mi atomic.AddInt32(&multiIndexCount, 1) @@ -194,8 +191,8 @@ func (i *ackIndex) advance(h int32) { // Record expired and not acknowledged queries for _, v := range dl { v.expire() - atomic.AddInt32(&responseCount, int32(-len(v.ri))) - atomic.AddInt32(&ackTrackerCount, int32(-len(v.qi))) + atomic.AddInt32(&responseCount, int32(-len(v.respIndex))) + atomic.AddInt32(&ackTrackerCount, int32(-len(v.ackIndex))) } atomic.AddInt32(&multiIndexCount, int32(-len(dl))) } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cd314ef99..5a180d6db 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -488,7 +488,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { // Keep track of the queries from the new block var ierr error for i, v := range b.QueryTxs { - if ierr = c.addResponse(v.Response); ierr != nil { + if ierr = c.AddResponse(v.Response); ierr != nil { log.WithFields(log.Fields{ "index": i, "producer": b.Producer(), @@ -1179,24 +1179,16 @@ func (c *Chain) replicationCycle(ctx context.Context) { } // Query queries req from local chain state and returns the query results in resp. -func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { - var ref *x.QueryTracker +func (c *Chain) Query( + req *types.Request) (tracker *x.QueryTracker, resp *types.Response, err error, +) { // TODO(leventeliu): we're using an external context passed by request. Make sure that // cancelling will be propagated to this context before chain instance stops. - if ref, resp, err = c.st.QueryWithContext(req.GetContext(), req); err != nil { - return - } - if err = resp.Sign(c.pk); err != nil { - return - } - if err = c.addResponse(&resp.Header); err != nil { - return - } - ref.UpdateResp(resp) - return + return c.st.QueryWithContext(req.GetContext(), req) } -func (c *Chain) addResponse(resp *types.SignedResponseHeader) (err error) { +// AddResponse addes a response to the ackIndex, awaiting for acknowledgement. +func (c *Chain) AddResponse(resp *types.SignedResponseHeader) (err error) { return c.ai.addResponse(c.rt.getHeightFromTime(resp.Request.Timestamp), resp) } diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 16ab4238b..fd6085e4d 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -345,7 +345,7 @@ func TestMultiChain(t *testing.T) { if err != nil { t.Errorf("error occurred: %v", err) - } else if err = c.addResponse(resp); err != nil { + } else if err = c.AddResponse(resp); err != nil { t.Errorf("error occurred: %v", err) } diff --git a/worker/db.go b/worker/db.go index 0adc7d83c..1fe1ab40b 100644 --- a/worker/db.go +++ b/worker/db.go @@ -25,6 +25,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" @@ -34,6 +35,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" + x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -78,6 +80,7 @@ type Database struct { chain *sqlchain.Chain nodeID proto.NodeID mux *DBKayakMuxService + privateKey *asymmetric.PrivateKey } // NewDatabase create a single database instance using config. @@ -93,12 +96,19 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, return } + // get private key + var privateKey *asymmetric.PrivateKey + if privateKey, err = kms.GetLocalPrivateKey(); err != nil { + return + } + // init database db = &Database{ cfg: cfg, dbID: cfg.DatabaseID, mux: cfg.KayakMux, connSeqEvictCh: make(chan uint64, 1), + privateKey: privateKey, } defer func() { @@ -212,6 +222,7 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err var ( isSlowQuery uint32 + tracker *x.QueryTracker tmStart = time.Now() ) @@ -231,18 +242,35 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: - return db.chain.Query(request) + if tracker, response, err = db.chain.Query(request); err != nil { + return + } case types.WriteQuery: if db.cfg.UseEventualConsistency { // reset context request.SetContext(context.Background()) - return db.chain.Query(request) + if tracker, response, err = db.chain.Query(request); err != nil { + return + } + } else { + if tracker, response, err = db.writeQuery(request); err != nil { + return + } } - return db.writeQuery(request) default: // TODO(xq262144): verbose errors with custom error structure return nil, errors.Wrap(ErrInvalidRequest, "invalid query type") } + + // Sign response + if err = response.Sign(db.privateKey); err != nil { + return + } + if err = db.chain.AddResponse(&response.Header); err != nil { + return + } + tracker.UpdateResp(response) + return } func (db *Database) logSlow(request *types.Request, isFinished bool, tmStart time.Time) { @@ -341,7 +369,7 @@ func (db *Database) Destroy() (err error) { return } -func (db *Database) writeQuery(request *types.Request) (response *types.Response, err error) { +func (db *Database) writeQuery(request *types.Request) (tracker *x.QueryTracker, response *types.Response, err error) { //ctx := context.Background() //ctx, task := trace.NewTask(ctx, "writeQuery") //defer task.End() @@ -371,12 +399,16 @@ func (db *Database) writeQuery(request *types.Request) (response *types.Response return } - var ok bool - if response, ok = (result).(*types.Response); !ok { + var ( + tr *TrackerAndResponse + ok bool + ) + if tr, ok = (result).(*TrackerAndResponse); !ok { err = errors.Wrap(err, "invalid response type") return } - + tracker = tr.Tracker + response = tr.Response return } diff --git a/worker/db_storage.go b/worker/db_storage.go index 1b49fce83..1461ac49e 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -23,6 +23,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" + x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -90,11 +91,21 @@ func (db *Database) Check(rawReq interface{}) (err error) { return } +// TrackerAndResponse defines a query tracker used by xenomint and an unsigned response. +type TrackerAndResponse struct { + Tracker *x.QueryTracker + Response *types.Response +} + // Commit implements kayak.types.Handler.Commit. func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { // convert query and check syntax - var req *types.Request - var ok bool + var ( + req *types.Request + response *types.Response + tracker *x.QueryTracker + ok bool + ) if req, ok = rawReq.(*types.Request); !ok || req == nil { err = errors.Wrap(ErrInvalidRequest, "invalid request payload") return @@ -104,7 +115,14 @@ func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { req.SetContext(context.Background()) // execute - return db.chain.Query(req) + if tracker, response, err = db.chain.Query(req); err != nil { + return + } + result = &TrackerAndResponse{ + Tracker: tracker, + Response: response, + } + return } func (db *Database) recordSequence(connID uint64, seqNo uint64) { From c82970d3158e1e7a6622f9a59f01d68f457f4475 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 8 Jan 2019 20:31:47 +0800 Subject: [PATCH 078/302] Revert "Support query pattern regulations" --- blockproducer/metastate.go | 19 ++-- blockproducer/metastate_test.go | 39 ++++---- client/driver.go | 2 +- client/helper_test.go | 2 +- cmd/cql-minerd/integration_test.go | 3 +- cmd/cql/main.go | 36 ++------ types/account.go | 143 +++++------------------------ types/account_gen.go | 86 +---------------- types/account_gen_test.go | 74 --------------- types/account_test.go | 95 ------------------- types/updatepermission.go | 2 +- types/updatepermission_gen.go | 18 +--- types/xxx_test.go | 20 +++- worker/chainbusservice_test.go | 4 +- worker/dbms.go | 73 +++++---------- worker/dbms_test.go | 123 ++----------------------- worker/helper_test.go | 8 +- 17 files changed, 132 insertions(+), 615 deletions(-) delete mode 100644 types/account_test.go diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 22aef9f75..1dfaf3891 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -458,7 +458,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI Users: []*types.SQLChainUser{ { Address: addr, - Permission: types.UserPermissionFromRole(types.Admin), + Permission: types.Admin, }, }, } @@ -466,7 +466,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI } func (s *metaState) addSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error, + k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, ) { var ( src, dst *types.SQLChainProfile @@ -515,7 +515,8 @@ func (s *metaState) deleteSQLChainUser(k proto.DatabaseID, addr proto.AccountAdd } func (s *metaState) alterSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error) { + k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, +) { var ( src, dst *types.SQLChainProfile ok bool @@ -702,7 +703,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) users := make([]*types.SQLChainUser, 1) users[0] = &types.SQLChainUser{ Address: sender, - Permission: types.UserPermissionFromRole(types.Admin), + Permission: types.Admin, Status: types.Normal, Deposit: minAdvancePayment, AdvancePayment: tx.AdvancePayment, @@ -877,7 +878,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { }).WithError(ErrDatabaseNotFound).Error("unexpected error in updatePermission") return ErrDatabaseNotFound } - if !tx.Permission.IsValid() { + if tx.Permission >= types.NumberOfUserPermission { log.WithFields(log.Fields{ "permission": tx.Permission, "dbID": tx.TargetSQLChain.DatabaseID(), @@ -890,8 +891,8 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { numOfAdmin := 0 targetUserIndex := -1 for i, u := range so.Users { - isAdmin = isAdmin || (sender == u.Address && u.Permission.HasAdminPermission()) - if u.Permission.HasAdminPermission() { + isAdmin = isAdmin || (sender == u.Address && u.Permission == types.Admin) + if u.Permission == types.Admin { numOfAdmin++ } if tx.TargetUser == u.Address { @@ -908,7 +909,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } // return error if number of Admin <= 1 and Admin want to revoke permission of itself - if numOfAdmin <= 1 && tx.TargetUser == sender && !tx.Permission.HasAdminPermission() { + if numOfAdmin <= 1 && tx.TargetUser == sender && tx.Permission != types.Admin { err = ErrNoAdminLeft log.WithFields(log.Fields{ "sender": sender.String(), @@ -946,7 +947,7 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { // check sender's permission isAdmin := false for _, user := range so.Users { - if sender == user.Address && user.Permission.HasAdminPermission() { + if sender == user.Address && user.Permission == types.Admin { isAdmin = true break } diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index aedd69e87..f6bca9ac2 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -106,11 +106,11 @@ func TestMetaState(t *testing.T) { Convey("The metaState should failed to operate SQLChain for unknown user", func() { err = ms.createSQLChain(addr1, dbID1) So(err, ShouldEqual, ErrAccountNotFound) - err = ms.addSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Admin)) + err = ms.addSQLChainUser(dbID1, addr1, types.Admin) So(err, ShouldEqual, ErrDatabaseNotFound) err = ms.deleteSQLChainUser(dbID1, addr1) So(err, ShouldEqual, ErrDatabaseNotFound) - err = ms.alterSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Write)) + err = ms.alterSQLChainUser(dbID1, addr1, types.Write) So(err, ShouldEqual, ErrDatabaseNotFound) }) Convey("When new account and database objects are stored", func() { @@ -170,9 +170,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldEqual, ErrDatabaseExists) }) Convey("When new SQLChain users are added", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.addSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.addSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldEqual, ErrDatabaseUserExists) Convey("The metaState object should be ok to delete user", func() { err = ms.deleteSQLChainUser(dbID3, addr2) @@ -181,9 +181,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) + err = ms.alterSQLChainUser(dbID3, addr2, types.Read) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.alterSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldBeNil) }) Convey("When metaState change is committed", func() { @@ -204,9 +204,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) + err = ms.alterSQLChainUser(dbID3, addr2, types.Read) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.alterSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldBeNil) }) }) @@ -214,9 +214,9 @@ func TestMetaState(t *testing.T) { Convey("When metaState change is committed", func() { ms.commit() Convey("The metaState object should be ok to add users for database", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.addSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) + err = ms.addSQLChainUser(dbID3, addr2, types.Write) So(err, ShouldEqual, ErrDatabaseUserExists) }) Convey("The metaState object should report database exists", func() { @@ -992,7 +992,7 @@ func TestMetaState(t *testing.T) { UpdatePermissionHeader: types.UpdatePermissionHeader{ TargetSQLChain: addr1, TargetUser: addr3, - Permission: types.UserPermissionFromRole(types.Read), + Permission: types.Read, Nonce: cd1.Nonce + 1, }, } @@ -1000,7 +1000,7 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrDatabaseNotFound) - up.Permission = types.UserPermissionFromRole(types.NumberOfUserPermission) + up.Permission = 4 up.TargetSQLChain = dbAccount err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1009,7 +1009,7 @@ func TestMetaState(t *testing.T) { // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 - up.Permission = types.UserPermissionFromRole(types.Admin) + up.Permission = types.Admin err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1018,7 +1018,7 @@ func TestMetaState(t *testing.T) { // addr3(admin) update addr4 as read up.TargetUser = addr4 up.Nonce = cd2.Nonce - up.Permission = types.UserPermissionFromRole(types.Read) + up.Permission = types.Read err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1034,7 +1034,7 @@ func TestMetaState(t *testing.T) { ms.commit() // addr3(admin) update addr3(admin) as read fail up.TargetUser = addr3 - up.Permission = types.UserPermissionFromRole(types.Read) + up.Permission = types.Read up.Nonce = up.Nonce + 1 err = up.Sign(privKey3) So(err, ShouldBeNil) @@ -1050,18 +1050,15 @@ func TestMetaState(t *testing.T) { co, loaded = ms.loadSQLChainObject(dbID) for _, user := range co.Users { if user.Address == addr1 { - So(user.Permission, ShouldNotBeNil) - So(user.Permission.Role, ShouldEqual, types.Read) + So(user.Permission, ShouldEqual, types.Read) continue } if user.Address == addr3 { - So(user.Permission, ShouldNotBeNil) - So(user.Permission.Role, ShouldEqual, types.Admin) + So(user.Permission, ShouldEqual, types.Admin) continue } if user.Address == addr4 { - So(user.Permission, ShouldNotBeNil) - So(user.Permission.Role, ShouldEqual, types.Read) + So(user.Permission, ShouldEqual, types.Read) continue } } diff --git a/client/driver.go b/client/driver.go index 1436071aa..a28634d3e 100644 --- a/client/driver.go +++ b/client/driver.go @@ -259,7 +259,7 @@ func GetTokenBalance(tt types.TokenType) (balance uint64, err error) { // UpdatePermission sends UpdatePermission transaction to chain. func UpdatePermission(targetUser proto.AccountAddress, - targetChain proto.AccountAddress, perm *types.UserPermission) (err error) { + targetChain proto.AccountAddress, perm types.UserPermission) (err error) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return diff --git a/client/helper_test.go b/client/helper_test.go index df069d0b1..0db24c669 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -175,7 +175,7 @@ func startTestService() (stopTestService func(), tempDir string, err error) { return } permStat := &types.PermStat{ - Permission: types.UserPermissionFromRole(types.Admin), + Permission: types.Admin, Status: types.Normal, } err = dbms.UpdatePermission(dbID, proto.AccountAddress(addr), permStat) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 3bfc6af21..3e6dcce1a 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -429,8 +429,7 @@ func TestFullProcess(t *testing.T) { } permStat, ok := usersMap[clientAddr] So(ok, ShouldBeTrue) - So(permStat.Permission, ShouldNotBeNil) - So(permStat.Permission.Role, ShouldEqual, types.Admin) + So(permStat.Permission, ShouldEqual, types.Admin) So(permStat.Status, ShouldEqual, types.Normal) _, err = db.Exec("CREATE TABLE test (test int)") diff --git a/cmd/cql/main.go b/cmd/cql/main.go index feff487c9..1c0efd6a6 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -72,15 +72,7 @@ var ( type userPermission struct { TargetChain proto.AccountAddress `json:"chain"` TargetUser proto.AccountAddress `json:"user"` - Perm json.RawMessage `json:"perm"` -} - -type userPermPayload struct { - // User role to access database. - Role types.UserPermissionRole `json:"role"` - // SQL pattern regulations for user queries - // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string `json:"patterns"` + Perm string `json:"perm"` } type tranToken struct { @@ -342,29 +334,17 @@ func main() { return } - var permPayload userPermPayload - - if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { - // try again using role string representation - if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { - log.WithError(err).Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return - } - } - - p := &types.UserPermission{ - Role: permPayload.Role, - Patterns: permPayload.Patterns, - } - - if !p.IsValid() { - log.Errorf("update permission failed: invalid permission description") + var p types.UserPermission + p.FromString(perm.Perm) + if p > types.NumberOfUserPermission { + log.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return } - if err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p); err != nil { + err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + + if err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) return diff --git a/types/account.go b/types/account.go index c98c58527..8f8b7295e 100644 --- a/types/account.go +++ b/types/account.go @@ -17,9 +17,6 @@ package types import ( - "encoding/json" - "sync" - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -38,25 +35,12 @@ const ( NumberOfRoles ) -// UserPermissionRole defines role of user permission including admin/write/read. -type UserPermissionRole int32 - // UserPermission defines permissions of a SQLChain user. -type UserPermission struct { - // User role to access database. - Role UserPermissionRole - // SQL pattern regulations for user queries - // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string - - // patterns map cache for matching - cachedPatternMapOnce sync.Once - cachedPatternMap map[string]bool -} +type UserPermission int32 const ( // Void defines the initial permission. - Void UserPermissionRole = iota + Void UserPermission = iota // Admin defines the admin user permission. Admin // Write defines the writer user permission. @@ -67,119 +51,42 @@ const ( NumberOfUserPermission ) -// UnmarshalJSON implements the json.Unmarshler interface. -func (r *UserPermissionRole) UnmarshalJSON(data []byte) (err error) { - var s string - if err = json.Unmarshal(data, &s); err != nil { - return - } - r.FromString(s) - return +// CheckRead returns true if user owns read permission. +func (up *UserPermission) CheckRead() bool { + return *up >= Admin && *up < NumberOfUserPermission } -// MarshalJSON implements the json.Marshaler interface. -func (r UserPermissionRole) MarshalJSON() ([]byte, error) { - return json.Marshal(r.String()) +// CheckWrite returns true if user owns write permission. +func (up *UserPermission) CheckWrite() bool { + return *up >= Admin && *up <= Write } -// String implements the fmt.Stringer interface. -func (r UserPermissionRole) String() string { - switch r { - case Admin: - return "Admin" - case Write: - return "Write" - case Read: - return "Read" - case Void: - return "Void" - default: - return "Unknown" - } +// CheckAdmin returns true if user owns admin permission. +func (up *UserPermission) CheckAdmin() bool { + return *up == Admin +} + +// Valid returns true if the value is a meaning permission value. +func (up *UserPermission) Valid() bool { + return *up >= Admin && *up < NumberOfUserPermission } -// FromString converts string to UserPermissionRole. -func (r *UserPermissionRole) FromString(perm string) { +// FromString converts string to UserPermission. +func (up *UserPermission) FromString(perm string) { switch perm { case "Admin": - *r = Admin + *up = Admin case "Write": - *r = Write + *up = Write case "Read": - *r = Read + *up = Read case "Void": - *r = Void + *up = Void default: - *r = NumberOfUserPermission - } -} - -// UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. -func UserPermissionFromRole(role UserPermissionRole) *UserPermission { - return &UserPermission{ - Role: role, + *up = NumberOfUserPermission } } -// HasReadPermission returns true if user owns read permission. -func (up *UserPermission) HasReadPermission() bool { - if up == nil { - return false - } - return up.Role >= Admin && up.Role < NumberOfUserPermission -} - -// HasWritePermission returns true if user owns write permission. -func (up *UserPermission) HasWritePermission() bool { - if up == nil { - return false - } - return up.Role >= Admin && up.Role <= Write -} - -// HasAdminPermission returns true if user owns admin permission. -func (up *UserPermission) HasAdminPermission() bool { - if up == nil { - return false - } - return up.Role == Admin -} - -// IsValid returns whether the permission object is valid or not. -func (up *UserPermission) IsValid() bool { - return up != nil && up.Role < NumberOfUserPermission && up.Role >= Admin -} - -// HasDisallowedQueryPatterns returns whether the queries are permitted. -func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query string, status bool) { - if up == nil { - status = true - return - } - if len(up.Patterns) == 0 { - status = false - return - } - - up.cachedPatternMapOnce.Do(func() { - up.cachedPatternMap = make(map[string]bool, len(up.Patterns)) - for _, p := range up.Patterns { - up.cachedPatternMap[p] = true - } - }) - - for _, q := range queries { - if !up.cachedPatternMap[q.Pattern] { - // not permitted - query = q.Pattern - status = true - break - } - } - - return -} - // Status defines status of a SQLChain user/miner. type Status int32 @@ -205,14 +112,14 @@ func (s *Status) EnableQuery() bool { // PermStat defines the permissions status structure. type PermStat struct { - Permission *UserPermission + Permission UserPermission Status Status } // SQLChainUser defines a SQLChain user. type SQLChainUser struct { Address proto.AccountAddress - Permission *UserPermission + Permission UserPermission AdvancePayment uint64 Arrears uint64 Deposit uint64 diff --git a/types/account_gen.go b/types/account_gen.go index 352302e7f..e69e63642 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -102,38 +102,6 @@ func (z *MinerInfo) Msgsize() (s int) { return } -// MarshalHash marshals for hash -func (z *PermStat) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82, 0x82) - if z.Permission == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x82) - o = hsp.AppendInt32(o, int32(z.Status)) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *PermStat) Msgsize() (s int) { - s = 1 + 11 - if z.Permission == nil { - s += hsp.NilSize - } else { - s += z.Permission.Msgsize() - } - s += 7 + hsp.Int32Size - return -} - // MarshalHash marshals for hash func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte @@ -303,21 +271,10 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 6 o = append(o, 0x86, 0x86) - if z.Permission == nil { - o = hsp.AppendNil(o) - } else { - // map header, size 2 - o = append(o, 0x82, 0x82) - o = hsp.AppendInt32(o, int32(z.Permission.Role)) - o = append(o, 0x82) - o = hsp.AppendArrayHeader(o, uint32(len(z.Permission.Patterns))) - for za0001 := range z.Permission.Patterns { - o = hsp.AppendString(o, z.Permission.Patterns[za0001]) - } - } - o = append(o, 0x86) o = hsp.AppendInt32(o, int32(z.Status)) o = append(o, 0x86) + o = hsp.AppendInt32(o, int32(z.Permission)) + o = append(o, 0x86) if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { @@ -334,16 +291,7 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainUser) Msgsize() (s int) { - s = 1 + 11 - if z.Permission == nil { - s += hsp.NilSize - } else { - s += 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize - for za0001 := range z.Permission.Patterns { - s += hsp.StringPrefixSize + len(z.Permission.Patterns[za0001]) - } - } - s += 7 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + s = 1 + 7 + hsp.Int32Size + 11 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size return } @@ -384,31 +332,7 @@ func (z *UserArrears) Msgsize() (s int) { } // MarshalHash marshals for hash -func (z *UserPermission) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82, 0x82) - o = hsp.AppendInt32(o, int32(z.Role)) - o = append(o, 0x82) - o = hsp.AppendArrayHeader(o, uint32(len(z.Patterns))) - for za0001 := range z.Patterns { - o = hsp.AppendString(o, z.Patterns[za0001]) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *UserPermission) Msgsize() (s int) { - s = 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize - for za0001 := range z.Patterns { - s += hsp.StringPrefixSize + len(z.Patterns[za0001]) - } - return -} - -// MarshalHash marshals for hash -func (z UserPermissionRole) MarshalHash() (o []byte, err error) { +func (z UserPermission) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) o = hsp.AppendInt32(o, int32(z)) @@ -416,7 +340,7 @@ func (z UserPermissionRole) MarshalHash() (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z UserPermissionRole) Msgsize() (s int) { +func (z UserPermission) Msgsize() (s int) { s = hsp.Int32Size return } diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 9b6a8a5d3..30e9ad803 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -83,43 +83,6 @@ func BenchmarkAppendMsgMinerInfo(b *testing.B) { } } -func TestMarshalHashPermStat(t *testing.T) { - v := PermStat{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashPermStat(b *testing.B) { - v := PermStat{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgPermStat(b *testing.B) { - v := PermStat{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - func TestMarshalHashProviderProfile(t *testing.T) { v := ProviderProfile{} binary.Read(rand.Reader, binary.BigEndian, &v) @@ -267,40 +230,3 @@ func BenchmarkAppendMsgUserArrears(b *testing.B) { bts, _ = v.MarshalHash() } } - -func TestMarshalHashUserPermission(t *testing.T) { - v := UserPermission{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashUserPermission(b *testing.B) { - v := UserPermission{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgUserPermission(b *testing.B) { - v := UserPermission{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} diff --git a/types/account_test.go b/types/account_test.go deleted file mode 100644 index 7a7eb665e..000000000 --- a/types/account_test.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright 2019 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "encoding/json" - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func TestUserPermissionFromRole(t *testing.T) { - Convey("test marshal/unmarshal json", t, func() { - jsonBytes, err := json.Marshal(Read) - So(err, ShouldBeNil) - So(jsonBytes, ShouldResemble, []byte(`"Read"`)) - var r UserPermissionRole - So(r, ShouldEqual, Void) - err = json.Unmarshal([]byte(`"Write"`), &r) - So(err, ShouldBeNil) - So(r, ShouldEqual, Write) - }) - Convey("test string/from string", t, func() { - var r UserPermissionRole - So(r, ShouldEqual, Void) - r.FromString(Read.String()) - So(r, ShouldEqual, Read) - }) -} - -func TestUserPermission(t *testing.T) { - Convey("nil protect", t, func() { - p := (*UserPermission)(nil) - So(p.HasReadPermission(), ShouldBeFalse) - So(p.HasWritePermission(), ShouldBeFalse) - So(p.HasAdminPermission(), ShouldBeFalse) - So(p.IsValid(), ShouldBeFalse) - _, state := p.HasDisallowedQueryPatterns([]Query{}) - So(state, ShouldBeTrue) - }) - Convey("has read permission", t, func() { - So(UserPermissionFromRole(Void).HasReadPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Read).HasReadPermission(), ShouldBeTrue) - So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeTrue) - So(UserPermissionFromRole(Admin).HasReadPermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasReadPermission(), ShouldBeFalse) - }) - Convey("has write permission", t, func() { - So(UserPermissionFromRole(Void).HasWritePermission(), ShouldBeFalse) - So(UserPermissionFromRole(Read).HasWritePermission(), ShouldBeFalse) - So(UserPermissionFromRole(Write).HasWritePermission(), ShouldBeTrue) - So(UserPermissionFromRole(Admin).HasWritePermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasWritePermission(), ShouldBeFalse) - }) - Convey("has admin permission", t, func() { - So(UserPermissionFromRole(Void).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Read).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Write).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Admin).HasAdminPermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasAdminPermission(), ShouldBeFalse) - }) - Convey("is valid", t, func() { - So(UserPermissionFromRole(Void).IsValid(), ShouldBeFalse) - So(UserPermissionFromRole(Read).IsValid(), ShouldBeTrue) - So(UserPermissionFromRole(Write).IsValid(), ShouldBeTrue) - So(UserPermissionFromRole(Admin).IsValid(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).IsValid(), ShouldBeFalse) - }) - Convey("query patterns", t, func() { - // empty patterns limitation - _, state := UserPermissionFromRole(Read).HasDisallowedQueryPatterns([]Query{ - { - Pattern: "select 1", - }, - { - Pattern: "insert into test values(1)", - }, - }) - So(state, ShouldBeFalse) - }) -} diff --git a/types/updatepermission.go b/types/updatepermission.go index 729829c3d..1b7ed46a6 100644 --- a/types/updatepermission.go +++ b/types/updatepermission.go @@ -30,7 +30,7 @@ import ( type UpdatePermissionHeader struct { TargetSQLChain proto.AccountAddress TargetUser proto.AccountAddress - Permission *UserPermission + Permission UserPermission Nonce interfaces.AccountNonce } diff --git a/types/updatepermission_gen.go b/types/updatepermission_gen.go index 2fb9875b3..443bfaa78 100644 --- a/types/updatepermission_gen.go +++ b/types/updatepermission_gen.go @@ -44,14 +44,10 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 4 o = append(o, 0x84, 0x84) - if z.Permission == nil { - o = hsp.AppendNil(o) + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err } else { - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } + o = hsp.AppendBytes(o, oTemp) } o = append(o, 0x84) if oTemp, err := z.Nonce.MarshalHash(); err != nil { @@ -76,12 +72,6 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermissionHeader) Msgsize() (s int) { - s = 1 + 11 - if z.Permission == nil { - s += hsp.NilSize - } else { - s += z.Permission.Msgsize() - } - s += 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() + s = 1 + 11 + z.Permission.Msgsize() + 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() return } diff --git a/types/xxx_test.go b/types/xxx_test.go index cf2454ab6..ab9d8f77f 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -68,6 +68,7 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro if err != nil { return + } h := hash.Hash{} @@ -95,8 +96,8 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro } err = b.PackAndSignBlock(priv) - return + } func generateRandomBillingRequestHeader() *BillingRequestHeader { @@ -108,6 +109,7 @@ func generateRandomBillingRequestHeader() *BillingRequestHeader { HighHeight: rand.Int31(), GasAmounts: generateRandomGasAmount(peerNum), } + } func generateRandomBillingRequest() (req *BillingRequest, err error) { @@ -117,6 +119,7 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { } if _, err = req.PackRequestHeader(); err != nil { return nil, err + } for i := 0; i < peerNum; i++ { @@ -125,29 +128,36 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return + } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return + } + } return + } func generateRandomBillingHeader() (tc *BillingHeader, err error) { var req *BillingRequest if req, err = generateRandomBillingRequest(); err != nil { return + } var priv *asymmetric.PrivateKey if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return + } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return + } receivers := make([]*proto.AccountAddress, peerNum) @@ -159,27 +169,33 @@ func generateRandomBillingHeader() (tc *BillingHeader, err error) { receivers[i] = &accountAddress fees[i] = rand.Uint64() rewards[i] = rand.Uint64() + } producer := proto.AccountAddress(generateRandomHash()) tc = NewBillingHeader(pi.AccountNonce(rand.Uint32()), req, producer, receivers, fees, rewards) return tc, nil + } func generateRandomBilling() (*Billing, error) { header, err := generateRandomBillingHeader() if err != nil { return nil, err + } priv, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { return nil, err + } txBilling := NewBilling(header) if err := txBilling.Sign(priv); err != nil { return nil, err + } return txBilling, nil + } func generateRandomGasAmount(n int) []*proto.AddrAndGas { @@ -191,9 +207,11 @@ func generateRandomGasAmount(n int) []*proto.AddrAndGas { RawNodeID: proto.RawNodeID{Hash: generateRandomHash()}, GasAmount: rand.Uint64(), } + } return gasAmount + } func randBytes(n int) (b []byte) { diff --git a/worker/chainbusservice_test.go b/worker/chainbusservice_test.go index 8fd721e2d..2429061e6 100644 --- a/worker/chainbusservice_test.go +++ b/worker/chainbusservice_test.go @@ -94,7 +94,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) + So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) @@ -116,7 +116,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) + So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) diff --git a/worker/dbms.go b/worker/dbms.go index 82630c903..9329ea6d1 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -444,7 +444,7 @@ func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { if err != nil { return } - err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType, req.Payload.Queries) + err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType) if err != nil { return } @@ -499,59 +499,32 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { } func (dbms *DBMS) checkPermission(addr proto.AccountAddress, - dbID proto.DatabaseID, queryType types.QueryType, queries []types.Query) (err error) { + dbID proto.DatabaseID, queryType types.QueryType) (err error) { log.Debugf("in checkPermission, database id: %s, user addr: %s", dbID, addr.String()) - var ( - permStat *types.PermStat - ok bool - ) - - // get database perm stat - permStat, ok = dbms.busService.RequestPermStat(dbID, addr) - - // perm stat not exists - if !ok { - err = errors.Wrap(ErrPermissionDeny, "database not exists") - return - } - - // check if query is enabled - if !permStat.Status.EnableQuery() { - err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) - return - } - - // check query type permission - switch queryType { - case types.ReadQuery: - if !permStat.Permission.HasReadPermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) + if permStat, ok := dbms.busService.RequestPermStat(dbID, addr); ok { + if !permStat.Status.EnableQuery() { + err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) return } - case types.WriteQuery: - if !permStat.Permission.HasWritePermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) + if queryType == types.ReadQuery { + if !permStat.Permission.CheckRead() { + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) + return + } + } else if queryType == types.WriteQuery { + if !permStat.Permission.CheckWrite() { + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) + return + } + } else { + err = errors.Wrapf(ErrInvalidPermission, + "invalid permission, permission: %d", permStat.Permission) return - } - default: - err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) - return - } - // check for query pattern - var ( - disallowedQuery string - hasDisallowedQuery bool - ) - - if disallowedQuery, hasDisallowedQuery = permStat.Permission.HasDisallowedQueryPatterns(queries); hasDisallowedQuery { - err = errors.Wrapf(ErrPermissionDeny, "disallowed query %s", disallowedQuery) - log.WithError(err).WithFields(log.Fields{ - "permission": permStat.Permission, - "query": disallowedQuery, - }).Debug("can not query") + } + } else { + err = errors.Wrap(ErrPermissionDeny, "database not exists") return } @@ -565,7 +538,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, log.WithFields(log.Fields{ "databaseID": dbID, "nodeID": nodeID, - }).WithError(err).Warning("get public key failed in addTxSubscription") + }).WithError(err).Warning("get pubkey failed in addTxSubscription") return } addr, err := crypto.PubKeyHash(pubkey) @@ -584,7 +557,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, "startHeight": startHeight, }).Debugf("addTxSubscription") - err = dbms.checkPermission(addr, dbID, types.ReadQuery, nil) + err = dbms.checkPermission(addr, dbID, types.ReadQuery) if err != nil { log.WithFields(log.Fields{"databaseID": dbID, "addr": addr}).WithError(err).Warning("permission deny") return diff --git a/worker/dbms_test.go b/worker/dbms_test.go index db8d80cbe..4895b024f 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -134,12 +134,11 @@ func TestDBMS(t *testing.T) { // grant write and read permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) + &types.PermStat{Permission: types.Write, Status: types.Normal}) So(err, ShouldBeNil) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Write) + So(userState.Permission, ShouldEqual, types.Write) So(userState.Status, ShouldEqual, types.Normal) Convey("success write and read", func() { @@ -194,11 +193,10 @@ func TestDBMS(t *testing.T) { // revoke write permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Read), Status: types.Normal}) + &types.PermStat{Permission: types.Read, Status: types.Normal}) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Read) + So(userState.Permission, ShouldEqual, types.Read) So(userState.Status, ShouldEqual, types.Normal) Convey("success reading and fail to write", func() { @@ -231,12 +229,10 @@ func TestDBMS(t *testing.T) { // grant invalid permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Void), Status: types.Normal}) - So(err, ShouldBeNil) + &types.PermStat{Permission: types.Void, Status: types.Normal}) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Void) + So(userState.Permission, ShouldEqual, types.Void) So(userState.Status, ShouldEqual, types.Normal) Convey("invalid permission query should fail", func() { @@ -268,12 +264,10 @@ func TestDBMS(t *testing.T) { // grant admin permission but in arrears err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Arrears}) - So(err, ShouldBeNil) + &types.PermStat{Permission: types.Admin, Status: types.Arrears}) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Permission, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Arrears) Convey("arrears query should fail", func() { @@ -302,12 +296,10 @@ func TestDBMS(t *testing.T) { // switch user to normal err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) - So(err, ShouldBeNil) + &types.PermStat{Permission: types.Admin, Status: types.Normal}) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Permission, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Normal) Convey("can send read and write queries", func() { @@ -354,101 +346,6 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) }) - // enforce query pattern regulations - err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: &types.UserPermission{ - Role: types.Admin, - Patterns: []string{ - "create table test (test int)", - "SELECT 1", - "INSERT INTO TEST VALUES(1)", - }, - }, Status: types.Normal}) - So(err, ShouldBeNil) - userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) - So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Admin) - So(userState.Permission.Patterns, ShouldHaveLength, 3) - - Convey("query patterns restrictions", func() { - var writeQuery *types.Request - var queryRes *types.Response - - // sending allowed write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 11, dbID, []string{ - "create table test (test int)", - "INSERT INTO TEST VALUES(1)", - }) - So(err, ShouldBeNil) - - err = testRequest(route.DBSQuery, writeQuery, &queryRes) - So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) - So(queryRes.Header.RowCount, ShouldEqual, 0) - - // sending allowed read query - var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 12, dbID, []string{ - "SELECT 1", - }) - So(err, ShouldBeNil) - - err = testRequest(route.DBSQuery, readQuery, &queryRes) - So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) - So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) - So(queryRes.Payload.Rows, ShouldHaveLength, 1) - So(queryRes.Payload.Rows[0].Values, ShouldHaveLength, 1) - So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) - - // sending disallowed write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 13, dbID, []string{ - "insert into test values(1)", - }) - So(err, ShouldBeNil) - err = testRequest(route.DBSQuery, writeQuery, &queryRes) - So(err, ShouldNotBeNil) - - // sending disallowed write query mixed with valid write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 14, dbID, []string{ - "INSERT INTO TEST VALUES(1)", - "insert into test values(1)", - }) - So(err, ShouldBeNil) - err = testRequest(route.DBSQuery, writeQuery, &queryRes) - So(err, ShouldNotBeNil) - - // sending disallowed read query - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 15, dbID, []string{ - "select * from test", - }) - So(err, ShouldBeNil) - err = testRequest(route.DBSQuery, readQuery, &queryRes) - So(err, ShouldNotBeNil) - - // sending disallowed read query - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 16, dbID, []string{ - "SELECT 1", - "select * from test", - }) - So(err, ShouldBeNil) - err = testRequest(route.DBSQuery, readQuery, &queryRes) - So(err, ShouldNotBeNil) - }) - - // set back permission object - err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) - So(err, ShouldBeNil) - userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) - So(ok, ShouldBeTrue) - So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Admin) - So(userState.Status, ShouldEqual, types.Normal) - Convey("query non-existent database", func() { // sending write query var writeQuery *types.Request diff --git a/worker/helper_test.go b/worker/helper_test.go index d561f2ed6..e7b48b14f 100644 --- a/worker/helper_test.go +++ b/worker/helper_test.go @@ -100,22 +100,22 @@ var ( testNotExistAddr = proto.AccountAddress(hash.THashH([]byte{'a', 'a'})) testUser1 = &types.SQLChainUser{ Address: testAddr, - Permission: types.UserPermissionFromRole(types.Write), + Permission: types.Write, Status: types.Normal, } testUser2 = &types.SQLChainUser{ Address: testAddr, - Permission: types.UserPermissionFromRole(types.Read), + Permission: types.Read, Status: types.Arrears, } testUser3 = &types.SQLChainUser{ Address: testAddr, - Permission: types.UserPermissionFromRole(types.Write), + Permission: types.Write, Status: types.Reminder, } testUser4 = &types.SQLChainUser{ Address: testAddr, - Permission: types.UserPermissionFromRole(types.Read), + Permission: types.Read, Status: types.Arbitration, } ) From fd66d2b9a22fb39c1cc22d5f6df2928d9fd854f9 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Tue, 8 Jan 2019 20:33:40 +0800 Subject: [PATCH 079/302] Add test case for ob permission checking --- blockproducer/metastate.go | 6 + cmd/cql-minerd/main.go | 2 +- cmd/cql-observer/observation_test.go | 222 +++++++++++++++++++-- cmd/cql-observer/service.go | 14 +- test/observation/node_0/config.yaml | 3 + test/observation/node_1/config.yaml | 3 + test/observation/node_2/config.yaml | 3 + test/observation/node_observer/config.yaml | 10 +- test/observation/node_observer/private.key | Bin 96 -> 94 bytes 9 files changed, 239 insertions(+), 24 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 1dfaf3891..fd6ff3889 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -864,6 +864,12 @@ func isProviderReqMatch(po *types.ProviderProfile, req *types.CreateDatabase) (m } func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { + log.WithFields(log.Fields{ + "tx_hash": tx.Hash().String(), + "sender": tx.GetAccountAddress(), + "db_id": tx.TargetSQLChain.String(), + "target_user": tx.TargetUser, + }).Debug("in updatePermission") sender, err := crypto.PubKeyHash(tx.Signee) if err != nil { log.WithFields(log.Fields{ diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index e922164e0..4034ed729 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -118,7 +118,7 @@ func initLogs() { func main() { // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.InfoLevel) + log.SetLevel(log.DebugLevel) flag.Parse() if showVersion { diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 039f9bf28..801fb1598 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -35,12 +35,15 @@ import ( "time" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" + "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -252,14 +255,16 @@ func TestFullProcess(t *testing.T) { Convey("test full process", t, func() { var ( - err error - cliPriv *asymmetric.PrivateKey - addr, addr2 proto.AccountAddress - dsn, dsn2 string - cfg, cfg2 *client.Config - dbID, dbID2 string - ctx1, ctx2, ctx3 context.Context - ccl1, ccl2, ccl3 context.CancelFunc + err error + cliPriv, obPriv *asymmetric.PrivateKey + addr, addr2 proto.AccountAddress + dbAddr, dbAddr2, obAddr, cliAddr proto.AccountAddress + dsn, dsn2 string + cfg, cfg2 *client.Config + dbID, dbID2 proto.DatabaseID + nonce interfaces.AccountNonce + ctx1, ctx2, ctx3, ctx4, ctx5 context.Context + ccl1, ccl2, ccl3, ccl4, ccl5 context.CancelFunc ) startNodes() defer stopNodes() @@ -268,7 +273,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) // get miner addresses - cliPriv, _, err = privKeyStoreToAccountAddr( + cliPriv, cliAddr, err = privKeyStoreToAccountAddr( FJ(testWorkingDir, "./observation/node_c/private.key"), []byte{}) So(err, ShouldBeNil) _, addr, err = privKeyStoreToAccountAddr( @@ -277,6 +282,9 @@ func TestFullProcess(t *testing.T) { _, addr2, err = privKeyStoreToAccountAddr( FJ(testWorkingDir, "./observation/node_miner_1/private.key"), []byte{}) So(err, ShouldBeNil) + obPriv, obAddr, err = privKeyStoreToAccountAddr( + FJ(testWorkingDir, "./observation/node_observer/private.key"), []byte{}) + So(err, ShouldBeNil) // wait until bp chain service is ready ctx1, ccl1 = context.WithTimeout(context.Background(), 1*time.Minute) @@ -298,12 +306,89 @@ func TestFullProcess(t *testing.T) { // wait cfg, err = client.ParseDSN(dsn) So(err, ShouldBeNil) - dbID = cfg.DatabaseID + dbID = proto.DatabaseID(cfg.DatabaseID) + dbAddr, err = dbID.AccountAddress() + So(err, ShouldBeNil) ctx2, ccl2 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl2() err = bp.WaitDatabaseCreation(ctx2, proto.DatabaseID(dbID), db, 3*time.Second) So(err, ShouldBeNil) + // get nonce for observer + nonce, err = requestNonce(cliAddr) + So(err, ShouldBeNil) + + // update permission for observer + up := types.NewUpdatePermission(&types.UpdatePermissionHeader{ + TargetSQLChain: dbAddr, + TargetUser: obAddr, + Permission: types.Read, + Nonce: nonce, + }) + err = up.Sign(cliPriv) + So(err, ShouldBeNil) + addTxReq := &types.AddTxReq{} + addTxResp := &types.AddTxResp{} + addTxReq.Tx = up + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // wait for profile permission checking + ctx4, ccl4 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl4() + err = waitProfileChecking(ctx4, 3*time.Second, proto.DatabaseID(dbID), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + log.WithFields(log.Fields{ + "addr": user.Address.String(), + "perm": user.Permission, + "stat": user.Status, + }).Debug("checkFunc 1") + if user.Address == obAddr { + return user.Permission.CheckRead() + } + } + return false + }) + So(err, ShouldBeNil) + + // get nonce for ob + nonce, err = requestNonce(obAddr) + So(err, ShouldBeNil) + + // transfer token to ob + tran := types.NewTransfer(&types.TransferHeader{ + Sender: obAddr, + Receiver: dbAddr, + Amount: 100000000, + TokenType: types.Particle, + Nonce: nonce, + }) + err = tran.Sign(obPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = tran + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // check ob status + ctx5, ccl5 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl5() + err = waitProfileChecking(ctx5, 3*time.Second, proto.DatabaseID(dbID), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + log.WithFields(log.Fields{ + "addr": user.Address.String(), + "perm": user.Permission, + "stat": user.Status, + }).Debug("checkFunc 2") + if user.Address == obAddr { + return user.Status.EnableQuery() + } + } + return false + }) + So(err, ShouldBeNil) + _, err = db.Exec("CREATE TABLE test (test int)") So(err, ShouldBeNil) @@ -369,7 +454,7 @@ func TestFullProcess(t *testing.T) { // wait cfg2, err = client.ParseDSN(dsn2) So(err, ShouldBeNil) - dbID2 = cfg2.DatabaseID + dbID2 = proto.DatabaseID(cfg2.DatabaseID) So(dbID, ShouldNotResemble, dbID2) ctx3, ccl3 = context.WithTimeout(context.Background(), 5*time.Minute) defer ccl3() @@ -403,7 +488,7 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql-observer.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), - "-database", dbID, "-reset", "oldest", + "-database", string(dbID), "-reset", "oldest", "-test.coverprofile", FJ(baseDir, "./cmd/cql-observer/observer.cover.out"), }, "observer", testWorkingDir, logDir, false, @@ -530,6 +615,77 @@ func TestFullProcess(t *testing.T) { So(err, ShouldNotBeNil) log.Info(err, res) + // test get genesis block by height + res, err = getJSON("v3/head/%v", dbID2) + So(err, ShouldNotBeNil) + + // get nonce for observer + nonce, err = requestNonce(cliAddr) + So(err, ShouldBeNil) + + // update permission for observer + dbAddr2, err = dbID2.AccountAddress() + So(err, ShouldBeNil) + up = types.NewUpdatePermission(&types.UpdatePermissionHeader{ + TargetSQLChain: dbAddr2, + TargetUser: obAddr, + Permission: types.Read, + Nonce: nonce, + }) + err = up.Sign(cliPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = up + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // wait for profile permission checking + ctx4, ccl4 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl4() + err = waitProfileChecking(ctx4, 3*time.Second, proto.DatabaseID(dbID2), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.Address == obAddr { + return user.Permission.CheckRead() + } + } + return false + }) + So(err, ShouldBeNil) + + // get nonce for ob + nonce, err = requestNonce(obAddr) + So(err, ShouldBeNil) + + // transfer token to ob + tran = types.NewTransfer(&types.TransferHeader{ + Sender: obAddr, + Receiver: dbAddr2, + Amount: 100000000, + TokenType: types.Particle, + Nonce: nonce, + }) + err = tran.Sign(obPriv) + So(err, ShouldBeNil) + addTxReq = &types.AddTxReq{} + addTxResp = &types.AddTxResp{} + addTxReq.Tx = tran + err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp) + So(err, ShouldBeNil) + + // check ob status + ctx5, ccl5 = context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl5() + err = waitProfileChecking(ctx5, 3*time.Second, proto.DatabaseID(dbID2), func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.Address == obAddr { + return user.Status.EnableQuery() + } + } + return false + }) + So(err, ShouldBeNil) + // test get genesis block by height res, err = getJSON("v3/head/%v", dbID2) So(err, ShouldBeNil) @@ -544,3 +700,45 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) }) } + +func requestNonce(addr proto.AccountAddress) (nonce interfaces.AccountNonce, err error) { + nonceReq := &types.NextAccountNonceReq{} + nonceResp := &types.NextAccountNonceResp{} + nonceReq.Addr = addr + err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp) + if err != nil { + return + } + nonce = nonceResp.Nonce + return +} + +func waitProfileChecking(ctx context.Context, period time.Duration, dbID proto.DatabaseID, + checkFunc func(profile *types.SQLChainProfile) bool) (err error) { + var ( + ticker = time.NewTicker(period) + req = &types.QuerySQLChainProfileReq{} + resp = &types.QuerySQLChainProfileResp{} + ) + defer ticker.Stop() + req.DBID = dbID + + for { + select { + case <-ticker.C: + err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp) + if err == nil { + if checkFunc(&resp.Profile) { + return + } + log.WithFields(log.Fields{ + "dbID": resp.Profile.Address, + "num_of_user": len(resp.Profile.Users), + }).Debugf("get profile but failed to check in waitProfileChecking") + } + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index b8f539e7a..153e3c339 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -34,6 +34,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/worker" + bolt "github.com/coreos/bbolt" ) @@ -283,12 +285,12 @@ func (s *Service) startSubscribe(dbID proto.DatabaseID) (err error) { return } - req := &sqlchain.MuxSubscribeTransactionsReq{} - resp := &sqlchain.MuxSubscribeTransactionsResp{} + req := &worker.SubscribeTransactionsReq{} + resp := &worker.SubscribeTransactionsResp{} req.Height = s.subscription[dbID] req.DatabaseID = dbID - err = s.minerRequest(dbID, route.SQLCSubscribeTransactions.String(), req, resp) + err = s.minerRequest(dbID, route.DBSSubscribeTransactions.String(), req, resp) return } @@ -440,11 +442,11 @@ func (s *Service) stop() (err error) { for dbID := range s.subscription { // send cancel subscription rpc - req := &sqlchain.MuxCancelSubscriptionReq{} - resp := &sqlchain.MuxCancelSubscriptionResp{} + req := &worker.CancelSubscriptionReq{} + resp := &worker.CancelSubscriptionResp{} req.DatabaseID = dbID - if err = s.minerRequest(dbID, route.SQLCCancelSubscription.String(), req, resp); err != nil { + if err = s.minerRequest(dbID, route.DBSCancelSubscription.String(), req, resp); err != nil { // cancel subscription failed log.WithField("db", dbID).WithError(err).Warning("cancel subscription") } diff --git a/test/observation/node_0/config.yaml b/test/observation/node_0/config.yaml index 21ad53261..e5c50bcd7 100644 --- a/test/observation/node_0/config.yaml +++ b/test/observation/node_0/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_1/config.yaml b/test/observation/node_1/config.yaml index 2ade86811..a301e204b 100644 --- a/test/observation/node_1/config.yaml +++ b/test/observation/node_1/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_2/config.yaml b/test/observation/node_2/config.yaml index 98f8b5bc0..3fd450a21 100644 --- a/test/observation/node_2/config.yaml +++ b/test/observation/node_2/config.yaml @@ -56,6 +56,9 @@ BlockProducer: - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/observation/node_observer/config.yaml b/test/observation/node_observer/config.yaml index fc516fca0..16d985851 100644 --- a/test/observation/node_observer/config.yaml +++ b/test/observation/node_observer/config.yaml @@ -4,7 +4,7 @@ PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" DHTFileName: "dht.db" ListenAddr: "127.0.0.1:4123" -ThisNodeID: "0000002100a44923021af2c91822e47998b0842cd450774c020257304acdce0b" +ThisNodeID: "00000045aecffbb1dc33a9846a2d4d1ca09593c3a316bb4ec635889ac3a8b0aa" QPS: 1000 BillingBlockCount: 3600 ChainBusPeriod: 1s @@ -71,14 +71,14 @@ KnownNodes: Addr: 127.0.0.1:4120 PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" Role: Follower -- ID: 0000002100a44923021af2c91822e47998b0842cd450774c020257304acdce0b +- ID: 00000045aecffbb1dc33a9846a2d4d1ca09593c3a316bb4ec635889ac3a8b0aa Nonce: - a: 819961 + a: 4399024610213 b: 0 c: 0 - d: 7322664668 + d: 0 Addr: 127.0.0.1:4123 - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + PublicKey: 02505a09a833710b691a570c5de399f3633ec4752422ae80b75f0dc8a8acc48c62 Role: Client - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade Nonce: diff --git a/test/observation/node_observer/private.key b/test/observation/node_observer/private.key index f563980c1fcd669303b1bee9c2172bf5a3519b8c..595bb0f5a0895d68d237c504c005f18a313d1198 100644 GIT binary patch literal 94 zcmV~$>k2?H7y!^`GNbv4hVXr@$nBqOvyofK>pKVRJEN)A>*ndho^er~w~C|e@Lb$p v^2~n|0?EZOt_7n=3uVnAAs@>5P7Q0fRt8MyMu&rSU?TYK2rg+f?W6Mt^`sw9 literal 96 zcmV-m0H6PF*slzHCqzPE3aw^kxJ?Q%G%ogw14*THn=7~eV;?h-t?#^t5W+6R^1DgL z$@60LgW8>L#Ft4anW%5%J6f5~?krWm@CHc~TLX=J0P-Na@n`wgY{PEN*;2omcYC0; Ca4_ls From 6fd719bc3a9d1e985f12099d7f0f5f24cefffe8d Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 16:31:08 +0800 Subject: [PATCH 080/302] Add bench tag --- Makefile | 6 ++++++ cmd/cql-minerd/main.go | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 43d6b462f..8163b4d8a 100644 --- a/Makefile +++ b/Makefile @@ -94,6 +94,12 @@ push_testnet: docker tag $(IMAGE):$(VERSION) $(IMAGE):testnet docker push $(IMAGE):testnet +push_bench: + docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):bench + docker push $(OB_IMAGE):bench + docker tag $(IMAGE):$(VERSION) $(IMAGE):bench + docker push $(IMAGE):bench + push: docker push $(OB_IMAGE):$(VERSION) docker push $(OB_IMAGE):latest diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index e922164e0..4034ed729 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -118,7 +118,7 @@ func initLogs() { func main() { // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.InfoLevel) + log.SetLevel(log.DebugLevel) flag.Parse() if showVersion { From a66cdd9be7c7fde53b1d4d1d09f5e98312a95c28 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 7 Jan 2019 21:17:49 +0800 Subject: [PATCH 081/302] Add trace log --- sqlchain/chain.go | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cd314ef99..633151e78 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1180,19 +1180,45 @@ func (c *Chain) replicationCycle(ctx context.Context) { // Query queries req from local chain state and returns the query results in resp. func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { - var ref *x.QueryTracker + var ( + ref *x.QueryTracker + start = time.Now() + + quired, signed, added, updated time.Duration + ) + + defer func() { + var fields = log.Fields{} + if quired > 0 { + fields["1#queried"] = float64(quired.Nanoseconds()) / 1000 + } + if signed > 0 { + fields["2#signed"] = float64((signed - quired).Nanoseconds()) / 1000 + } + if added > 0 { + fields["3#added"] = float64((added - signed).Nanoseconds()) / 1000 + } + if updated > 0 { + fields["4#updated"] = float64((updated - added).Nanoseconds()) / 1000 + } + log.WithFields(fields).Debug("Query duration stat (us)") + }() // TODO(leventeliu): we're using an external context passed by request. Make sure that // cancelling will be propagated to this context before chain instance stops. if ref, resp, err = c.st.QueryWithContext(req.GetContext(), req); err != nil { return } + quired = time.Since(start) if err = resp.Sign(c.pk); err != nil { return } + signed = time.Since(start) if err = c.addResponse(&resp.Header); err != nil { return } + added = time.Since(start) ref.UpdateResp(resp) + updated = time.Since(start) return } From 5eb2dc10956050dfc854d4119e4f70696e19f3d4 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Tue, 8 Jan 2019 20:43:57 +0800 Subject: [PATCH 082/302] Change debug level to InfoLevel --- cmd/cql-minerd/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 4034ed729..e922164e0 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -118,7 +118,7 @@ func initLogs() { func main() { // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.InfoLevel) flag.Parse() if showVersion { From 1eef4076b0e6ddaa116d3ab6a330c9639624470d Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Tue, 8 Jan 2019 21:03:53 +0800 Subject: [PATCH 083/302] Remove unusable code --- route/acl.go | 8 -------- sqlchain/mux.go | 52 ------------------------------------------------- sqlchain/rpc.go | 29 --------------------------- 3 files changed, 89 deletions(-) diff --git a/route/acl.go b/route/acl.go index f62cb403e..8efa6a1e9 100644 --- a/route/acl.go +++ b/route/acl.go @@ -90,10 +90,6 @@ const ( SQLCSignBilling // SQLCLaunchBilling is used by blockproducer to trigger the billing process in sqlchain SQLCLaunchBilling - // SQLCSubscribeTransactions is used by sqlchain to handle observer subscription request - SQLCSubscribeTransactions - // SQLCCancelSubscription is used by sqlchain to handle observer subscription cancellation request - SQLCCancelSubscription // OBSAdviseNewBlock is used by sqlchain to push new block to observers OBSAdviseNewBlock // MCCAdviseNewBlock is used by block producer to push block to adjacent nodes @@ -166,10 +162,6 @@ func (s RemoteFunc) String() string { return "SQLC.SignBilling" case SQLCLaunchBilling: return "SQLC.LaunchBilling" - case SQLCSubscribeTransactions: - return "SQLC.SubscribeTransactions" - case SQLCCancelSubscription: - return "SQLC.CancelSubscription" case OBSAdviseNewBlock: return "OBS.AdviseNewBlock" case MCCAdviseNewBlock: diff --git a/sqlchain/mux.go b/sqlchain/mux.go index 836f038c9..234c2c7a9 100644 --- a/sqlchain/mux.go +++ b/sqlchain/mux.go @@ -103,34 +103,6 @@ type MuxFetchBlockResp struct { FetchBlockResp } -// MuxSubscribeTransactionsReq defines a request of the SubscribeTransactions RPC method. -type MuxSubscribeTransactionsReq struct { - proto.Envelope - proto.DatabaseID - SubscribeTransactionsReq -} - -// MuxSubscribeTransactionsResp defines a response of the SubscribeTransactions RPC method. -type MuxSubscribeTransactionsResp struct { - proto.Envelope - proto.DatabaseID - SubscribeTransactionsResp -} - -// MuxCancelSubscriptionReq defines a request of the CancelSubscription RPC method. -type MuxCancelSubscriptionReq struct { - proto.Envelope - proto.DatabaseID - CancelSubscriptionReq -} - -// MuxCancelSubscriptionResp defines a response of the CancelSubscription RPC method. -type MuxCancelSubscriptionResp struct { - proto.Envelope - proto.DatabaseID - CancelSubscriptionResp -} - // AdviseNewBlock is the RPC method to advise a new produced block to the target server. func (s *MuxService) AdviseNewBlock(req *MuxAdviseNewBlockReq, resp *MuxAdviseNewBlockResp) error { if v, ok := s.serviceMap.Load(req.DatabaseID); ok { @@ -176,27 +148,3 @@ func (s *MuxService) FetchBlock(req *MuxFetchBlockReq, resp *MuxFetchBlockResp) return ErrUnknownMuxRequest } - -// SubscribeTransactions is the RPC method to subscribe transactions from the target server. -func (s *MuxService) SubscribeTransactions(req *MuxSubscribeTransactionsReq, resp *MuxSubscribeTransactionsResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - req.SubscribeTransactionsReq.SubscriberID = req.GetNodeID().ToNodeID() - return v.(*ChainRPCService).SubscribeTransactions(&req.SubscribeTransactionsReq, &resp.SubscribeTransactionsResp) - } - - return ErrUnknownMuxRequest -} - -// CancelSubscription is the RPC method to cancel subscription from the target server. -func (s *MuxService) CancelSubscription(req *MuxCancelSubscriptionReq, resp *MuxCancelSubscriptionResp) (err error) { - if v, ok := s.serviceMap.Load(req.DatabaseID); ok { - resp.Envelope = req.Envelope - resp.DatabaseID = req.DatabaseID - req.CancelSubscriptionReq.SubscriberID = req.GetNodeID().ToNodeID() - return v.(*ChainRPCService).CancelSubscription(&req.CancelSubscriptionReq, &resp.CancelSubscriptionResp) - } - - return ErrUnknownMuxRequest -} diff --git a/sqlchain/rpc.go b/sqlchain/rpc.go index e9691d668..40b4a84b7 100644 --- a/sqlchain/rpc.go +++ b/sqlchain/rpc.go @@ -17,7 +17,6 @@ package sqlchain import ( - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" ) @@ -64,24 +63,6 @@ type FetchBlockResp struct { Block *types.Block } -// SubscribeTransactionsReq defines a request of SubscribeTransaction RPC method. -type SubscribeTransactionsReq struct { - SubscriberID proto.NodeID - Height int32 -} - -// SubscribeTransactionsResp defines a response of SubscribeTransaction RPC method. -type SubscribeTransactionsResp struct { -} - -// CancelSubscriptionReq defines a request of CancelSubscription RPC method. -type CancelSubscriptionReq struct { - SubscriberID proto.NodeID -} - -// CancelSubscriptionResp defines a response of CancelSubscription RPC method. -type CancelSubscriptionResp struct{} - // AdviseNewBlock is the RPC method to advise a new produced block to the target server. func (s *ChainRPCService) AdviseNewBlock(req *AdviseNewBlockReq, resp *AdviseNewBlockResp) ( err error) { @@ -107,13 +88,3 @@ func (s *ChainRPCService) FetchBlock(req *FetchBlockReq, resp *FetchBlockResp) ( resp.Block, err = s.chain.FetchBlock(req.Height) return } - -// SubscribeTransactions is the RPC method to fetch subscribe new packed and confirmed transactions from the target server. -func (s *ChainRPCService) SubscribeTransactions(req *SubscribeTransactionsReq, _ *SubscribeTransactionsResp) error { - return s.chain.AddSubscription(req.SubscriberID, req.Height) -} - -// CancelSubscription is the RPC method to cancel subscription in the target server. -func (s *ChainRPCService) CancelSubscription(req *CancelSubscriptionReq, _ *CancelSubscriptionResp) error { - return s.chain.CancelSubscription(req.SubscriberID) -} From bc0df6fa0fc18d254871d3118b58857351591acd Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 22:01:24 +0800 Subject: [PATCH 084/302] Change counter name to fix the new object type --- sqlchain/ackindex.go | 8 ++++---- sqlchain/chain.go | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go index 8fab0d349..748937cc6 100644 --- a/sqlchain/ackindex.go +++ b/sqlchain/ackindex.go @@ -29,7 +29,7 @@ var ( // Global atomic counters for stats multiIndexCount int32 responseCount int32 - ackTrackerCount int32 + ackCount int32 ) type multiAckIndex struct { @@ -77,7 +77,7 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { delete(i.respIndex, key) i.ackIndex[key] = ack atomic.AddInt32(&responseCount, -1) - atomic.AddInt32(&ackTrackerCount, 1) + atomic.AddInt32(&ackCount, 1) return } @@ -98,7 +98,7 @@ func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { return } delete(i.ackIndex, key) - atomic.AddInt32(&ackTrackerCount, -1) + atomic.AddInt32(&ackCount, -1) return } err = errors.Wrapf(ErrQueryNotFound, "remove key %s -x- ack %s", &key, ack.Hash()) @@ -192,7 +192,7 @@ func (i *ackIndex) advance(h int32) { for _, v := range dl { v.expire() atomic.AddInt32(&responseCount, int32(-len(v.respIndex))) - atomic.AddInt32(&ackTrackerCount, int32(-len(v.ackIndex))) + atomic.AddInt32(&ackCount, int32(-len(v.ackIndex))) } atomic.AddInt32(&multiIndexCount, int32(-len(dl))) } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 5a180d6db..391cea798 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1222,7 +1222,7 @@ func (c *Chain) stat() { var ( ic = atomic.LoadInt32(&multiIndexCount) rc = atomic.LoadInt32(&responseCount) - tc = atomic.LoadInt32(&ackTrackerCount) + tc = atomic.LoadInt32(&ackCount) bc = atomic.LoadInt32(&cachedBlockCount) ) // Print chain stats From 012c7ff03925fe18f89ce64f7296804f3831d5c6 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 22:15:39 +0800 Subject: [PATCH 085/302] Add error wraps --- worker/db.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/worker/db.go b/worker/db.go index 1fe1ab40b..a8c07ce46 100644 --- a/worker/db.go +++ b/worker/db.go @@ -243,6 +243,7 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: if tracker, response, err = db.chain.Query(request); err != nil { + err = errors.Wrap(err, "failed to query read query") return } case types.WriteQuery: @@ -250,10 +251,12 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err // reset context request.SetContext(context.Background()) if tracker, response, err = db.chain.Query(request); err != nil { + err = errors.Wrap(err, "failed to execute with eventual consistency") return } } else { if tracker, response, err = db.writeQuery(request); err != nil { + err = errors.Wrap(err, "failed to execute") return } } @@ -264,9 +267,11 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err // Sign response if err = response.Sign(db.privateKey); err != nil { + err = errors.Wrap(err, "failed to sign response") return } if err = db.chain.AddResponse(&response.Header); err != nil { + err = errors.Wrap(err, "failed to add response to index") return } tracker.UpdateResp(response) From cbc1721ac162a38d444fd53a5ecd21eaf3c43dfa Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 22:49:55 +0800 Subject: [PATCH 086/302] Fix forever loop on exiting --- sqlchain/chain.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 391cea798..694473e45 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -585,6 +585,10 @@ func (c *Chain) produceBlock(now time.Time) (err error) { // TODO(leventeliu): maybe block waiting at a ready channel instead? for !v.Ready() { time.Sleep(1 * time.Millisecond) + if c.rt.ctx.Err() != nil { + err = c.rt.ctx.Err() + return + } } block.QueryTxs[i] = &types.QueryAsTx{ // TODO(leventeliu): add acks for billing. @@ -1050,7 +1054,6 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { if block.Producer() == c.rt.server { return c.pushBlock(block) } - // Check block producer index, found := peers.Find(block.Producer()) From ec6c55aeff9298b36694ac37331f48405fd8d6ca Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 8 Jan 2019 23:01:40 +0800 Subject: [PATCH 087/302] Adjust log levels --- cmd/cql-minerd/integration_test.go | 4 ++-- cmd/cql-minerd/main.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 3e6dcce1a..290f5af34 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -160,7 +160,7 @@ func startNodes() { []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"), }, - "miner1", testWorkingDir, logDir, true, + "miner1", testWorkingDir, logDir, false, ); err == nil { nodeCmds = append(nodeCmds, cmd) } else { @@ -173,7 +173,7 @@ func startNodes() { []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"), }, - "miner2", testWorkingDir, logDir, true, + "miner2", testWorkingDir, logDir, false, ); err == nil { nodeCmds = append(nodeCmds, cmd) } else { diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 4034ed729..e922164e0 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -118,7 +118,7 @@ func initLogs() { func main() { // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.InfoLevel) flag.Parse() if showVersion { From f8deb1a501f87e4c7434b254472969e591ca92c5 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Wed, 9 Jan 2019 12:04:09 +0800 Subject: [PATCH 088/302] Query transaction confirmed state --- blockproducer/branch.go | 2 +- blockproducer/chain_io.go | 15 +++++++++++++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 3129f7c4f..cfa42b5af 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -240,7 +240,7 @@ func (b *branch) clearUnpackedTxs(txs []pi.Transaction) { } } -func (b *branch) queryTx(hash hash.Hash) (state pi.TransactionState, ok bool) { +func (b *branch) queryTxState(hash hash.Hash) (state pi.TransactionState, ok bool) { if _, ok = b.unpacked[hash]; ok { state = pi.TransactionStatePending return diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index e843b7e1c..27c09e405 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -140,10 +140,21 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err defer c.RUnlock() var ok bool state = pi.TransactionStateNotFound - if state, ok = c.headBranch.queryTx(hash); ok { + if state, ok = c.headBranch.queryTxState(hash); ok { return } - // TODO(leventeliu): get confirmed state from tx history. + + var ( + count int + querySQL = `select count(*) from indexed_transactions where hash = ?` + ) + if err = c.storage.Reader().QueryRow(querySQL, hash.String()).Scan(&count); err != nil { + return pi.TransactionStateNotFound, err + } + + if count > 0 { + return pi.TransactionStateConfirmed, nil + } return } From 7df41cb8536446353c43a454b67c3331bfc2e1c9 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Wed, 9 Jan 2019 12:15:47 +0800 Subject: [PATCH 089/302] Format SQL query statements --- blockproducer/chain_io.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index 27c09e405..8d04bf54d 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -146,7 +146,7 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err var ( count int - querySQL = `select count(*) from indexed_transactions where hash = ?` + querySQL = `SELECT COUNT(*) FROM "indexed_transactions" WHERE "hash" = ?` ) if err = c.storage.Reader().QueryRow(querySQL, hash.String()).Scan(&count); err != nil { return pi.TransactionStateNotFound, err From cd1d3a0ab2ee20b6cf9ec6314ae21d752ba775a9 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 9 Jan 2019 12:25:03 +0800 Subject: [PATCH 090/302] Fix return tx state --- blockproducer/chain_io.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/chain_io.go b/blockproducer/chain_io.go index 8d04bf54d..5b6f19b96 100644 --- a/blockproducer/chain_io.go +++ b/blockproducer/chain_io.go @@ -139,7 +139,7 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err c.RLock() defer c.RUnlock() var ok bool - state = pi.TransactionStateNotFound + if state, ok = c.headBranch.queryTxState(hash); ok { return } @@ -155,7 +155,7 @@ func (c *Chain) queryTxState(hash hash.Hash) (state pi.TransactionState, err err if count > 0 { return pi.TransactionStateConfirmed, nil } - return + return pi.TransactionStateNotFound, nil } func (c *Chain) immutableNextNonce(addr proto.AccountAddress) (n pi.AccountNonce, err error) { From a568ccb88a33101284e94e246bad2ca52e9a880e Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 9 Jan 2019 17:34:34 +0800 Subject: [PATCH 091/302] Minor fix --- blockproducer/branch.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index cfa42b5af..93592fec4 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -57,7 +57,7 @@ func newBranch( } // Apply new blocks to view and pool for _, bn := range list { - if len(bn.block.Transactions) > pl.MaxPendingTxsPerAccount { + if len(bn.block.Transactions) > pl.MaxTransactionsPerBlock { return nil, ErrTooManyTransactionsInBlock } From 2f548d5ba4eec2208974d63b31dab70a3a79b839 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 9 Jan 2019 17:49:31 +0800 Subject: [PATCH 092/302] Remove .String() in logging --- blockproducer/metastate.go | 40 +++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 6379cf150..af18e806b 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -104,10 +104,10 @@ func (s *metaState) loadAccountTokenBalance(addr proto.AccountAddress, var o *types.Account defer func() { log.WithFields(log.Fields{ - "account": addr.String(), - "balance": b, - "tokenType": tokenType, - "loaded": loaded, + "account": addr, + "balance": b, + "token_type": tokenType, + "loaded": loaded, }).Debug("queried token account") }() @@ -124,7 +124,7 @@ func (s *metaState) loadAccountTokenBalance(addr proto.AccountAddress, func (s *metaState) storeBaseAccount(k proto.AccountAddress, v *types.Account) (err error) { log.WithFields(log.Fields{ - "addr": k.String(), + "addr": k, "account": v, }).Debug("store account") // Since a transfer tx may create an empty receiver account, this method should try to cover @@ -308,7 +308,7 @@ func (s *metaState) transferAccountToken(transfer *types.Transfer) (err error) { } if realSender != transfer.Sender { err = errors.Wrapf(ErrInvalidSender, - "applyTx failed: real sender %s, sender %s", realSender.String(), transfer.Sender.String()) + "applyTx failed: real sender %s, sender %s", realSender, transfer.Sender) log.WithError(err).Warning("public key not match sender in applyTransaction") return } @@ -545,7 +545,7 @@ func (s *metaState) nextNonce(addr proto.AccountAddress) (nonce pi.AccountNonce, if o, loaded = s.readonly.accounts[addr]; !loaded { err = ErrAccountNotFound log.WithFields(log.Fields{ - "addr": addr.String(), + "addr": addr, }).WithError(err).Error("unexpected error") return } @@ -622,7 +622,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) } if sender != tx.Owner { err = errors.Wrapf(ErrInvalidSender, "match failed with real sender: %s, sender: %s", - sender.String(), tx.Owner.String()) + sender, tx.Owner) return } @@ -650,8 +650,8 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) for _, m := range tx.ResourceMeta.TargetMiners { if po, loaded := s.loadProviderObject(m); !loaded { log.WithFields(log.Fields{ - "miner_addr": m.String(), - "user_addr": sender.String(), + "miner_addr": m, + "user_addr": sender, }).Error(err) err = ErrNoSuchMiner continue @@ -865,9 +865,9 @@ func isProviderReqMatch(po *types.ProviderProfile, req *types.CreateDatabase) (m func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { log.WithFields(log.Fields{ - "tx_hash": tx.Hash().String(), + "tx_hash": tx.Hash(), "sender": tx.GetAccountAddress(), - "db_id": tx.TargetSQLChain.String(), + "db_id": tx.TargetSQLChain, "target_user": tx.TargetUser, }).Debug("in updatePermission") sender, err := crypto.PubKeyHash(tx.Signee) @@ -918,9 +918,9 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { if numOfAdmin <= 1 && tx.TargetUser == sender && tx.Permission != types.Admin { err = ErrNoAdminLeft log.WithFields(log.Fields{ - "sender": sender.String(), - "dbID": tx.TargetSQLChain.String(), - "targetUser": tx.TargetUser.String(), + "sender": sender, + "dbID": tx.TargetSQLChain, + "targetUser": tx.TargetUser, }).WithError(err).Warning("in updatePermission") return } @@ -1012,7 +1012,7 @@ func (s *metaState) updateBilling(tx *types.UpdateBilling) (err error) { } for _, userCost := range tx.Users { - log.Debugf("update billing user cost: %s, cost: %d", userCost.User.String(), userCost.Cost) + log.Debugf("update billing user cost: %s, cost: %d", userCost.User, userCost.Cost) costMap[userCost.User] = userCost.Cost if _, ok := userMap[userCost.User]; !ok { userMap[userCost.User] = make(map[proto.AccountAddress]uint64) @@ -1088,7 +1088,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err if realSender != transfer.Sender { err = errors.Wrapf(ErrInvalidSender, - "applyTx failed: real sender %s, sender %s", realSender.String(), transfer.Sender.String()) + "applyTx failed: real sender %s, sender %s", realSender, transfer.Sender) log.WithError(err).Warning("public key not match sender in applyTransaction") return } @@ -1103,7 +1103,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err err = ErrDatabaseNotFound log.WithFields(log.Fields{ "dbid": transfer.Receiver.DatabaseID(), - "sender": transfer.Sender.String(), + "sender": transfer.Sender, }).WithError(err).Warning("database not exist in transferSQLChainTokenBalance") return } @@ -1111,7 +1111,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err err = ErrWrongTokenType log.WithFields(log.Fields{ "dbid": transfer.Receiver.DatabaseID(), - "sender": transfer.Sender.String(), + "sender": transfer.Sender, }).WithError(err).Warning("error token type in transferSQLChainTokenBalance") return } @@ -1119,7 +1119,7 @@ func (s *metaState) transferSQLChainTokenBalance(transfer *types.Transfer) (err if account.TokenBalance[transfer.TokenType] < transfer.Amount { err = ErrInsufficientBalance log.WithFields(log.Fields{ - "addr": account.Address.String(), + "addr": account.Address, "amount": account.TokenBalance[transfer.TokenType], "transfer_amount": transfer.Amount, "token_type": transfer.TokenType, From ed098df9f9f1573bba7aaca8182304899772e128 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 9 Jan 2019 18:57:11 +0800 Subject: [PATCH 093/302] Add 0.2ms GNTE test, remove 200ms test, remove cpu=2 test case. --- cmd/cql-minerd/benchGNTE.sh | 6 --- test/GNTE/conf/gnte_0.2ms.yaml | 79 ++++++++++++++++++++++++++++++++++ test/GNTE/run.sh | 2 +- 3 files changed, 80 insertions(+), 7 deletions(-) create mode 100644 test/GNTE/conf/gnte_0.2ms.yaml diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index 89181bd47..d7895b6a3 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -14,12 +14,6 @@ go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte. go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=2 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log diff --git a/test/GNTE/conf/gnte_0.2ms.yaml b/test/GNTE/conf/gnte_0.2ms.yaml new file mode 100644 index 000000000..d89f65dff --- /dev/null +++ b/test/GNTE/conf/gnte_0.2ms.yaml @@ -0,0 +1,79 @@ +# Only support 10.250.0.2 ~ 10.250.254.254 +group: + - + name: bp + nodes: + - # bp10.250.1.2 + ip: 10.250.1.2/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_0/config.yaml" + - # bp10.250.1.3 + ip: 10.250.1.3/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_1/config.yaml" + - # bp10.250.1.4 + ip: 10.250.1.4/32 + cmd: "cd /scripts && ./bin/cqld -config ./node_2/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + - + name: miner + nodes: + - # miner10.250.100.2 + ip: 10.250.100.2/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.2/config.yaml" + - # miner10.250.100.3 + ip: 10.250.100.3/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.3/config.yaml" + - # miner10.250.100.4 + ip: 10.250.100.4/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.4/config.yaml" + - # miner10.250.100.5 + ip: 10.250.100.5/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.5/config.yaml" + - # miner10.250.100.6 + ip: 10.250.100.6/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.6/config.yaml" + - # miner10.250.100.7 + ip: 10.250.100.7/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.7/config.yaml" + - # miner10.250.100.8 + ip: 10.250.100.8/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.8/config.yaml" + - # miner10.250.100.9 + ip: 10.250.100.9/32 + cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_10.250.100.9/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + - + name: client + nodes: + - # node_c + ip: 10.250.0.2/32 + cmd: "ping -c3 g.cn" + - # node_adapter + ip: 10.250.0.254/32 + cmd: "cd /scripts && ./bin/cql-adapter -config ./node_c/config.yaml" + delay: "0.2ms" + rate: "1000mbit" + +network: + - + groups: + - bp + - miner + delay: "0.2ms" + rate: "1000mbit" + + - + groups: + - bp + - client + delay: "0.2ms" + rate: "1000mbit" + + - + groups: + - client + - miner + delay: "0.2ms" + rate: "1000mbit" + diff --git a/test/GNTE/run.sh b/test/GNTE/run.sh index 2d6be6b36..1912ef866 100755 --- a/test/GNTE/run.sh +++ b/test/GNTE/run.sh @@ -1,7 +1,7 @@ #!/bin/bash -x yaml=( - ./scripts/gnte_{0,5,20,100,200}ms.yaml + ./scripts/gnte_{0,0.2,5,20,100}ms.yaml ) TEST_WD=$(cd $(dirname $0)/; pwd) From 81add6eebf7a878de6ef2e0f9650953c8c29c518 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 9 Jan 2019 20:35:08 +0800 Subject: [PATCH 094/302] Optimize loop --- cmd/cql-minerd/integration_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 266237bb5..193633451 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -815,6 +815,7 @@ func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { for _, node := range conf.GConf.KnownNodes { if node.Role == proto.Leader { log.Infof("Benching started on bp addr: %v", node.Addr) + } } From c83d981342055362db315c42f45e8f46331f0413 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 9 Jan 2019 20:36:17 +0800 Subject: [PATCH 095/302] Optimize loop --- cmd/cql-minerd/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 193633451..a9e2d1cfb 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -815,7 +815,7 @@ func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { for _, node := range conf.GConf.KnownNodes { if node.Role == proto.Leader { log.Infof("Benching started on bp addr: %v", node.Addr) - + break } } From 1cd7c79013d52a662132bb05b7a6757b6da27b95 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 9 Jan 2019 20:11:47 +0800 Subject: [PATCH 096/302] Add trace log for kayak follower --- kayak/runtime.go | 72 +++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 68 insertions(+), 4 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index ed8476bd4..7c0b0083f 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -415,6 +415,7 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { var tmStart, tmEnd time.Time defer func() { + tmEnd = time.Now() log.WithFields(log.Fields{ "t": l.Type.String(), "i": l.Index, @@ -422,6 +423,7 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { }).WithError(err).Debug("kayak follower apply") }() + tmStart = time.Now() r.peersLock.RLock() defer r.peersLock.RUnlock() @@ -482,24 +484,51 @@ func (r *Runtime) doCheck(req interface{}) (err error) { } func (r *Runtime) followerPrepare(l *kt.Log) (err error) { + var ( + tmStart = time.Now() + + tmDecode, tmCheck, tmWriteWAL, tmMark time.Time + ) + + defer func() { + var fields = log.Fields{"index": l.Index} + if tmDecode.After(tmStart) { + fields["decode"] = tmDecode.Sub(tmStart).Nanoseconds() + } + if tmCheck.After(tmDecode) { + fields["check"] = tmCheck.Sub(tmDecode).Nanoseconds() + } + if tmWriteWAL.After(tmCheck) { + fields["write_wal"] = tmWriteWAL.Sub(tmCheck).Nanoseconds() + } + if tmMark.After(tmWriteWAL) { + fields["mark"] = tmMark.Sub(tmWriteWAL).Nanoseconds() + } + log.WithFields(fields).Debug("kayak follower prepare stat") + }() + // decode var req interface{} if req, err = r.sh.DecodePayload(l.Data); err != nil { err = errors.Wrap(err, "decode kayak payload failed") return } + tmDecode = time.Now() if err = r.doCheck(req); err != nil { return } + tmCheck = time.Now() // write log if err = r.wal.Write(l); err != nil { err = errors.Wrap(err, "write follower prepare log failed") return } + tmWriteWAL = time.Now() r.markPendingPrepare(l.Index) + tmMark = time.Now() return } @@ -528,25 +557,59 @@ func (r *Runtime) followerRollback(l *kt.Log) (err error) { } func (r *Runtime) followerCommit(l *kt.Log) (err error) { - var prepareLog *kt.Log - var lastCommit uint64 + var ( + prepareLog *kt.Log + lastCommit uint64 + cResult *commitResult + tmStart = time.Now() + + tmGetPrepareLog, tmCheckPrepareFinished, tmCommitDequeue, tmMark time.Time + ) + + defer func() { + var fields = log.Fields{ + "index": l.Index, + } + if tmGetPrepareLog.After(tmStart) { + fields["get_prepare_log"] = tmGetPrepareLog.Sub(tmStart).Nanoseconds() + } + if tmCheckPrepareFinished.After(tmGetPrepareLog) { + fields["check_prepare_finish"] = + tmCheckPrepareFinished.Sub(tmGetPrepareLog).Nanoseconds() + } + if cResult != nil && cResult.dbCost > 0 { + fields["database_cost"] = cResult.dbCost.Nanoseconds() + } + if tmCommitDequeue.After(tmCheckPrepareFinished) { + fields["commit_dequeue"] = tmCommitDequeue.Sub(tmCheckPrepareFinished).Nanoseconds() + } + if tmMark.After(tmCommitDequeue) { + fields["commit_dequeue"] = tmMark.Sub(tmCommitDequeue).Nanoseconds() + } + log.WithFields(fields).Debug("kayak follower commit stat") + }() + if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { err = errors.Wrap(err, "get original request in commit failed") return } + tmGetPrepareLog = time.Now() // check if prepare already processed if r.checkIfPrepareFinished(prepareLog.Index) { err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") return } + tmCheckPrepareFinished = time.Now() - cResult := <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) + cResult = <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) if cResult != nil { err = cResult.err } + tmCommitDequeue = time.Now() r.markPrepareFinished(l.Index) + tmMark = time.Now() return } @@ -708,6 +771,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { return } + var tmStart = time.Now() // check for last commit availability myLastCommit := atomic.LoadUint64(&r.lastCommit) if req.lastCommit != myLastCommit { @@ -730,7 +794,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { // mark last commit atomic.StoreUint64(&r.lastCommit, req.log.Index) - req.result <- &commitResult{err: err} + req.result <- &commitResult{err: err, dbCost: time.Since(tmStart)} return } From 7e27442a85ded2f8837b10a6e7521301d0323ce4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 9 Jan 2019 11:44:13 +0800 Subject: [PATCH 097/302] Command line configurable log levels --- cmd/cql-minerd/main.go | 6 ++++-- cmd/cql-mysql-adapter/main.go | 3 +++ cmd/cql-observer/main.go | 6 ++++-- cmd/cqld/main.go | 8 +++++--- utils/log/logwrapper.go | 14 ++++++++++++++ 5 files changed, 30 insertions(+), 7 deletions(-) diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index e922164e0..cae5c5723 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -81,6 +81,7 @@ var ( // other noLogo bool showVersion bool + logLevel string ) const name = `cql-minerd` @@ -101,6 +102,7 @@ func init() { flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") flag.StringVar(&metricGraphite, "metricGraphiteServer", "", "Metric graphite server to push metrics") flag.StringVar(&traceFile, "traceFile", "", "trace profile") + flag.StringVar(&logLevel, "logLevel", "", "service log level") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) @@ -116,10 +118,10 @@ func initLogs() { } func main() { + flag.Parse() // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.InfoLevel) - flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", diff --git a/cmd/cql-mysql-adapter/main.go b/cmd/cql-mysql-adapter/main.go index a0322c9ee..89d799c3b 100644 --- a/cmd/cql-mysql-adapter/main.go +++ b/cmd/cql-mysql-adapter/main.go @@ -40,6 +40,7 @@ var ( mysqlUser string mysqlPassword string showVersion bool + logLevel string ) func init() { @@ -52,10 +53,12 @@ func init() { flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "listen address for mysql adapter") flag.StringVar(&mysqlUser, "mysql-user", "root", "mysql user for adapter server") flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "mysql password for adapter server") + flag.StringVar(&logLevel, "logLevel", "", "service log level") } func main() { flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index c21f9875a..e7db6b843 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -45,6 +45,7 @@ var ( listenAddr string resetPosition string showVersion bool + logLevel string ) func init() { @@ -55,13 +56,14 @@ func init() { "Disable signature sign and verify, for testing") flag.StringVar(&resetPosition, "reset", "", "reset subscribe position") flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "listen address for http explorer api") + flag.StringVar(&logLevel, "logLevel", "", "service log level") } func main() { + flag.Parse() // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) - flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) if showVersion { fmt.Printf("%v %v %v %v %v\n", name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index e52bd3463..0dc7071af 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -59,7 +59,8 @@ var ( clientMode bool clientOperation string - mode string // "normal", "api" + mode string // "normal", "api" + logLevel string ) const name = `cqld` @@ -78,6 +79,7 @@ func init() { flag.BoolVar(&clientMode, "client", false, "run as client") flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") flag.StringVar(&mode, "mode", "normal", "run mode, e.g. normal, api") + flag.StringVar(&logLevel, "logLevel", "", "service log level") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) @@ -93,10 +95,10 @@ func initLogs() { } func main() { + flag.Parse() + log.SetStringLevel(logLevel, log.InfoLevel) // set random rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) - flag.Parse() if showVersion { fmt.Printf("%v %v %v %v %v\n", diff --git a/utils/log/logwrapper.go b/utils/log/logwrapper.go index 72c1a37cc..0e528810f 100644 --- a/utils/log/logwrapper.go +++ b/utils/log/logwrapper.go @@ -209,6 +209,20 @@ func GetLevel() logrus.Level { return logrus.GetLevel() } +// ParseLevel parse the level string and returns the logger level. +func ParseLevel(lvl string) (logrus.Level, error) { + return logrus.ParseLevel(lvl) +} + +// SetStringLevel enforce current log level. +func SetStringLevel(lvl string, defaultLevel logrus.Level) { + if lvl, err := ParseLevel(lvl); err != nil { + SetLevel(defaultLevel) + } else { + SetLevel(lvl) + } +} + // AddHook adds a hook to the standard logger hooks. func AddHook(hook logrus.Hook) { logrus.AddHook(hook) From aad97a6bcb5fd98320b15ff6f4c5e5da16f18b62 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 9 Jan 2019 21:37:26 +0800 Subject: [PATCH 098/302] Normalize command-line flags --- cmd/cql-adapter/main.go | 2 +- cmd/cql-faucet/main.go | 2 +- cmd/cql-minerd/integration_test.go | 26 +++++++++++++------------- cmd/cql-minerd/main.go | 14 +++++++------- cmd/cql-mysql-adapter/README.md | 2 +- cmd/cql-mysql-adapter/main.go | 4 ++-- cmd/cql-observer/main.go | 4 ++-- cmd/cql/main.go | 2 +- cmd/cqld/main.go | 4 ++-- 9 files changed, 30 insertions(+), 30 deletions(-) diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go index 9259a5704..100a5b552 100644 --- a/cmd/cql-adapter/main.go +++ b/cmd/cql-adapter/main.go @@ -42,7 +42,7 @@ var ( func init() { flag.StringVar(&configFile, "config", "./config.yaml", "config file for adapter") flag.StringVar(&password, "password", "", "master key password") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index d1f67bd1e..14496f3f7 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -44,7 +44,7 @@ var ( func init() { flag.StringVar(&configFile, "config", "config.yaml", "configuration file for covenantsql") flag.StringVar(&password, "password", "", "master key password for covenantsql") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index a9e2d1cfb..6bec0c894 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -187,7 +187,7 @@ func startNodesProfile(bypassSign bool) { ctx := context.Background() bypassArg := "" if bypassSign { - bypassArg = "-bypassSignature" + bypassArg = "-bypass-signature" } // wait for ports to be available @@ -256,10 +256,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8080", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8080", + "-metric-log", bypassArg, }, "miner0", testWorkingDir, logDir, false, @@ -274,10 +274,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8081", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8081", + "-metric-log", bypassArg, }, "miner1", testWorkingDir, logDir, false, @@ -292,10 +292,10 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - //"-traceFile", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), - "-metricGraphiteServer", "192.168.2.100:2003", - "-profileServer", "0.0.0.0:8082", - "-metricLog", + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + "-metric-graphite-server", "192.168.2.100:2003", + "-profile-server", "0.0.0.0:8082", + "-metric-log", bypassArg, }, "miner2", testWorkingDir, logDir, false, diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index cae5c5723..4736f4d3e 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -89,20 +89,20 @@ const desc = `CovenantSQL is a Distributed Database running on BlockChain` func init() { flag.BoolVar(&noLogo, "nologo", false, "Do not print logo") - flag.BoolVar(&metricLog, "metricLog", false, "Print metrics in log") + flag.BoolVar(&metricLog, "metric-log", false, "Print metrics in log") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&genKeyPair, "genKeyPair", false, "Gen new key pair when no private key found") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&genKeyPair, "gen-keypair", false, "Gen new key pair when no private key found") + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") - flag.StringVar(&profileServer, "profileServer", "", "Profile server address, default not started") + flag.StringVar(&profileServer, "profile-server", "", "Profile server address, default not started") flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") - flag.StringVar(&metricGraphite, "metricGraphiteServer", "", "Metric graphite server to push metrics") - flag.StringVar(&traceFile, "traceFile", "", "trace profile") - flag.StringVar(&logLevel, "logLevel", "", "service log level") + flag.StringVar(&metricGraphite, "metric-graphite-server", "", "Metric graphite server to push metrics") + flag.StringVar(&traceFile, "trace-file", "", "trace profile") + flag.StringVar(&logLevel, "log-level", "", "service log level") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) diff --git a/cmd/cql-mysql-adapter/README.md b/cmd/cql-mysql-adapter/README.md index c5a0eee7f..73eeaa4b3 100644 --- a/cmd/cql-mysql-adapter/README.md +++ b/cmd/cql-mysql-adapter/README.md @@ -33,7 +33,7 @@ Avaiable command-line arguments are: ```shell $ cql-mysql-adapter --help Usage of ./cql-mysql-adapter: - -bypassSignature + -bypass-signature Disable signature sign and verify, for testing -config string config file for mysql adapter (default "./config.yaml") diff --git a/cmd/cql-mysql-adapter/main.go b/cmd/cql-mysql-adapter/main.go index 89d799c3b..aad7ce4b0 100644 --- a/cmd/cql-mysql-adapter/main.go +++ b/cmd/cql-mysql-adapter/main.go @@ -46,14 +46,14 @@ var ( func init() { flag.StringVar(&configFile, "config", "./config.yaml", "config file for mysql adapter") flag.StringVar(&password, "password", "", "master key password") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "listen address for mysql adapter") flag.StringVar(&mysqlUser, "mysql-user", "root", "mysql user for adapter server") flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "mysql password for adapter server") - flag.StringVar(&logLevel, "logLevel", "", "service log level") + flag.StringVar(&logLevel, "log-level", "", "service log level") } func main() { diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index e7db6b843..620ca8068 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -52,11 +52,11 @@ func init() { flag.StringVar(&configFile, "config", "./config.yaml", "config file path") flag.StringVar(&dbID, "database", "", "database to listen for observation") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.StringVar(&resetPosition, "reset", "", "reset subscribe position") flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "listen address for http explorer api") - flag.StringVar(&logLevel, "logLevel", "", "service log level") + flag.StringVar(&logLevel, "log-level", "", "service log level") } func main() { diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 1c0efd6a6..05225b3e4 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -201,7 +201,7 @@ func init() { flag.StringVar(&fileName, "file", "", "execute commands from file and exit") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") flag.BoolVar(&noRC, "no-rc", false, "do not read start up file") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.StringVar(&outFile, "out", "", "output file") flag.StringVar(&configFile, "config", "config.yaml", "config file for covenantsql") diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 0dc7071af..c1c117ac0 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -69,7 +69,7 @@ const desc = `CovenantSQL is a Distributed Database running on BlockChain` func init() { flag.BoolVar(&noLogo, "nologo", false, "Do not print logo") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&asymmetric.BypassSignature, "bypassSignature", false, + flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") @@ -79,7 +79,7 @@ func init() { flag.BoolVar(&clientMode, "client", false, "run as client") flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") flag.StringVar(&mode, "mode", "normal", "run mode, e.g. normal, api") - flag.StringVar(&logLevel, "logLevel", "", "service log level") + flag.StringVar(&logLevel, "log-level", "", "service log level") flag.Usage = func() { fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) From 1ce930003fb67f88fb5d06deb5f7122e4f42258c Mon Sep 17 00:00:00 2001 From: Ggicci Date: Wed, 9 Jan 2019 21:40:09 +0800 Subject: [PATCH 099/302] Register API node to BP DHT network --- cmd/cqld/main.go | 62 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index e52bd3463..ab51b6673 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -22,13 +22,19 @@ import ( "math/rand" "os" "runtime" + "strings" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/pkg/errors" ) const logo = ` @@ -116,9 +122,17 @@ func main() { kms.InitBP() log.Debugf("config:\n%#v", conf.GConf) + // BP DO NOT Generate new key pair conf.GConf.GenerateKeyPair = false + if mode == "api" { + if err = registerNodeToBP(30 * time.Second); err != nil { + log.WithError(err).Fatal("register node to BP") + return + } + } + // init log initLogs() @@ -145,3 +159,51 @@ func main() { log.Info("server stopped") } + +func registerNodeToBP(timeout time.Duration) (err error) { + // get local node id + localNodeID, err := kms.GetLocalNodeID() + if err != nil { + return errors.WithMessage(err, "get local node id") + } + + // get local node info + localNodeInfo, err := kms.GetNodeInfo(localNodeID) + if err != nil { + return errors.WithMessage(err, "get local node info") + } + + log.WithField("node", localNodeInfo).Debug("construct local node info") + + pingWaitCh := make(chan proto.NodeID) + bpNodeIDs := route.GetBPs() + for _, bpNodeID := range bpNodeIDs { + go func(ch chan proto.NodeID, id proto.NodeID) { + for { + err := rpc.PingBP(localNodeInfo, id) + if err == nil { + log.WithField("node", localNodeInfo).Info("ping BP node") + ch <- id + return + } + if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { + log.Debug("stop ping non-leader BP node") + return + } + + log.WithField("node", localNodeInfo).WithError(err).Error("ping BP node") + time.Sleep(3 * time.Second) + } + }(pingWaitCh, bpNodeID) + } + + select { + case bp := <-pingWaitCh: + close(pingWaitCh) + log.WithField("BP", bp).Infof("ping BP node") + case <-time.After(timeout): + return errors.New("ping BP timeout") + } + + return +} From 02a137293fd7b86df5017f7d46e9a4138d386db7 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Wed, 9 Jan 2019 22:40:54 +0800 Subject: [PATCH 100/302] Fix API node register DHT failed --- cmd/cqld/bootstrap.go | 7 +++++++ cmd/cqld/main.go | 11 ++--------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 893978090..ead3a69ce 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -79,6 +79,13 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { return } + if mode == "api" { + if err = registerNodeToBP(30 * time.Second); err != nil { + log.WithError(err).Fatal("register node to BP") + return + } + } + var server *rpc.Server // create server diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 1394e4bd1..16e3a52d3 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -61,8 +61,8 @@ var ( noLogo bool showVersion bool configFile string - wsapiAddr string - mode string // "normal", "api" + wsapiAddr string + mode string // "normal", "api" ) const name = `cqld` @@ -122,13 +122,6 @@ func main() { // BP DO NOT Generate new key pair conf.GConf.GenerateKeyPair = false - if mode == "api" { - if err = registerNodeToBP(30 * time.Second); err != nil { - log.WithError(err).Fatal("register node to BP") - return - } - } - // init log initLogs() From 0b8d3869fde3637fc2f03c5e9589cf50ce45b6aa Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 10 Jan 2019 01:03:52 +0800 Subject: [PATCH 101/302] Suppress unused services in API node --- cmd/cqld/bootstrap.go | 70 ++++++++++++++++++++++--------------------- 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index ead3a69ce..f01a76c19 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -105,43 +105,45 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server.Stop() }() - // init storage - log.Info("init storage") - var st *LocalStorage - if st, err = initStorage(conf.GConf.DHTFileName); err != nil { - log.WithError(err).Error("init storage failed") - return - } + if mode == "normal" { + // init storage + log.Info("init storage") + var st *LocalStorage + if st, err = initStorage(conf.GConf.DHTFileName); err != nil { + log.WithError(err).Error("init storage failed") + return err + } - // init kayak - log.Info("init kayak runtime") - var kayakRuntime *kayak.Runtime - if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { - log.WithError(err).Error("init kayak runtime failed") - return - } + // init kayak + log.Info("init kayak runtime") + var kayakRuntime *kayak.Runtime + if kayakRuntime, err = initKayakTwoPC(rootPath, thisNode, peers, st, server); err != nil { + log.WithError(err).Error("init kayak runtime failed") + return err + } - // init kayak and consistent - log.Info("init kayak and consistent runtime") - kvServer := &KayakKVServer{ - Runtime: kayakRuntime, - KVStorage: st, - } - dht, err := route.NewDHTService(conf.GConf.DHTFileName, kvServer, true) - if err != nil { - log.WithError(err).Error("init consistent hash failed") - return - } + // init kayak and consistent + log.Info("init kayak and consistent runtime") + kvServer := &KayakKVServer{ + Runtime: kayakRuntime, + KVStorage: st, + } + dht, err := route.NewDHTService(conf.GConf.DHTFileName, kvServer, true) + if err != nil { + log.WithError(err).Error("init consistent hash failed") + return err + } - // set consistent handler to kayak storage - kvServer.KVStorage.consistent = dht.Consistent + // set consistent handler to kayak storage + kvServer.KVStorage.consistent = dht.Consistent - // register service rpc - log.Info("register dht service rpc") - err = server.RegisterService(route.DHTRPCName, dht) - if err != nil { - log.WithError(err).Error("register dht service failed") - return + // register service rpc + log.Info("register dht service rpc") + err = server.RegisterService(route.DHTRPCName, dht) + if err != nil { + log.WithError(err).Error("register dht service failed") + return err + } } // init main chain service @@ -159,7 +161,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { chain, err := bp.NewChain(chainConfig) if err != nil { log.WithError(err).Error("init chain failed") - return + return err } chain.Start() defer chain.Stop() From 8613a4937cf654fdba0a3116d71e7948c2eaf187 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Thu, 10 Jan 2019 11:00:19 +0800 Subject: [PATCH 102/302] Add wait function for billing test --- cmd/cql-minerd/integration_test.go | 70 +++++++++++++++++++++--------- 1 file changed, 49 insertions(+), 21 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 6bec0c894..dbe5258f1 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -543,30 +543,28 @@ func TestFullProcess(t *testing.T) { c.So(err, ShouldBeNil) }) - time.Sleep(20 * time.Second) - - profileReq = &types.QuerySQLChainProfileReq{} - profileResp = &types.QuerySQLChainProfileResp{} - profileReq.DBID = dbID - err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) + ctx2, ccl2 := context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl2() + err = waitProfileChecking(ctx2, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool { + for _, user := range profile.Users { + if user.AdvancePayment != testAdvancePayment { + return true + } + } + return false + }) So(err, ShouldBeNil) - for _, user := range profileResp.Profile.Users { - log.Infof("user (%s) left advance payment: %d", user.Address.String(), user.AdvancePayment) - if user.AdvancePayment == testAdvancePayment { - time.Sleep(20 * time.Second) - break + + ctx3, ccl3 := context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl3() + err = waitProfileChecking(ctx3, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool { + getIncome := false + for _, miner := range profile.Miners { + getIncome = getIncome || (miner.PendingIncome != 0 || miner.ReceivedIncome != 0) } - } - err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) + return getIncome + }) So(err, ShouldBeNil) - for _, user := range profileResp.Profile.Users { - So(user.AdvancePayment, ShouldNotEqual, testAdvancePayment) - } - getIncome := false - for _, miner := range profileResp.Profile.Miners { - getIncome = getIncome || (miner.PendingIncome != 0 || miner.ReceivedIncome != 0) - } - So(getIncome, ShouldBeTrue) err = db.Close() So(err, ShouldBeNil) @@ -575,6 +573,36 @@ func TestFullProcess(t *testing.T) { }) } +func waitProfileChecking(ctx context.Context, period time.Duration, dbID proto.DatabaseID, + checkFunc func(profile *types.SQLChainProfile) bool) (err error) { + var ( + ticker = time.NewTicker(period) + req = &types.QuerySQLChainProfileReq{} + resp = &types.QuerySQLChainProfileResp{} + ) + defer ticker.Stop() + req.DBID = dbID + + for { + select { + case <-ticker.C: + err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp) + if err == nil { + if checkFunc(&resp.Profile) { + return + } + log.WithFields(log.Fields{ + "dbID": resp.Profile.Address, + "num_of_user": len(resp.Profile.Users), + }).Debugf("get profile but failed to check in waitProfileChecking") + } + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + const ROWSTART = 1000000 const TABLENAME = "insert_table0" From 84f6fcdab98d03a76fe8c3f918c25110d1317358 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 10 Jan 2019 11:43:55 +0800 Subject: [PATCH 103/302] Retry to connect to wsapi in TestJSONRPCService --- api/service_test.go | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/api/service_test.go b/api/service_test.go index e9ab62306..6e922dfd4 100644 --- a/api/service_test.go +++ b/api/service_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strconv" "testing" + "time" "github.com/CovenantSQL/CovenantSQL/api" "github.com/CovenantSQL/CovenantSQL/api/models" @@ -132,23 +133,35 @@ func mockData(t *testing.T) { } func setupWebsocketClient(addr string) (client *jsonrpc2.Conn, err error) { - // TODO: dial timeout - conn, _, err := websocket.DefaultDialer.DialContext( - context.Background(), - addr, - nil, - ) - if err != nil { - return nil, err + var dial = func(ctx context.Context, addr string) (client *jsonrpc2.Conn, err error) { + conn, _, err := websocket.DefaultDialer.DialContext( + context.Background(), + addr, + nil, + ) + if err != nil { + return nil, err + } + + var connOpts []jsonrpc2.ConnOpt + return jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + nil, + connOpts..., + ), nil + } + + for i := 0; i < 3; i++ { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + client, err = dial(ctx, addr) + if err == nil { + break + } } - var connOpts []jsonrpc2.ConnOpt - return jsonrpc2.NewConn( - context.Background(), - wsstream.NewObjectStream(conn), - nil, - connOpts..., - ), nil + return client, err } type bpGetBlockTestCase struct { @@ -185,7 +198,7 @@ func (c *bpGetTransactionByHashTestCase) String() string { return fmt.Sprintf("fetch transaction hashed %q", c.Hash) } -func TestService(t *testing.T) { +func TestJSONRPCService(t *testing.T) { t.Logf("testdb: %s", testdb) mockData(t) defer os.Remove(testdb + "-shm") From e1bb2a1690a77e4f021f938e75cc94f945ce214f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 8 Jan 2019 17:58:16 +0800 Subject: [PATCH 104/302] Add database id field to sqlchain logs --- sqlchain/chain.go | 60 ++++++++++++++++++++++++++++++++++------------- 1 file changed, 44 insertions(+), 16 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cad0ec084..b68c2501f 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -162,7 +162,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } - log.Debugf("create new chain bdb %s", bdbFile) + log.WithField("db", c.DatabaseID).Debugf("create new chain bdb %s", bdbFile) // Open LevelDB for ack/request/response tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" @@ -172,7 +172,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } - log.Debugf("create new chain tdb %s", tdbFile) + log.WithField("db", c.DatabaseID).Debugf("create new chain tdb %s", tdbFile) // Open x.State var ( @@ -197,7 +197,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro } addr, err = crypto.PubKeyHash(pk.PubKey()) if err != nil { - log.WithError(err).Warning("failed to generate addr in NewChain") + log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in NewChain") return } @@ -283,7 +283,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } addr, err = crypto.PubKeyHash(pk.PubKey()) if err != nil { - log.WithError(err).Warning("failed to generate addr in LoadChain") + log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in LoadChain") return } @@ -328,6 +328,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "peer": chain.rt.getPeerInfoString(), "state": st, + "db": c.DatabaseID, }).Debug("loading state from database") // Read blocks and rebuild memory index @@ -355,6 +356,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "peer": chain.rt.getPeerInfoString(), "block": block.BlockHash().String(), + "db": c.DatabaseID, }).Debug("loading block from database") if last == nil { @@ -413,6 +415,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "height": h, "header": resp.Hash().String(), + "db": c.DatabaseID, }).Debug("loaded new resp header") } if err = respIter.Error(); err != nil { @@ -434,6 +437,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err log.WithFields(log.Fields{ "height": h, "header": ack.Hash().String(), + "db": c.DatabaseID, }).Debug("loaded new ack header") } if err = respIter.Error(); err != nil { @@ -493,6 +497,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { "index": i, "producer": b.Producer(), "block_hash": b.BlockHash(), + "db": c.databaseID, }).WithError(ierr).Warn("failed to add response to ackIndex") } } @@ -502,6 +507,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { "index": i, "producer": b.Producer(), "block_hash": b.BlockHash(), + "db": c.databaseID, }).WithError(ierr).Warn("failed to remove Ack from ackIndex") } } @@ -524,6 +530,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { return "|" }(), st.Head.String()[:8]), "headHeight": c.rt.getHead().Height, + "db": c.databaseID, }).Info("pushed new block") } @@ -532,7 +539,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { - log.Debugf("push ack %s", ack.Hash().String()) + log.WithField("db", c.databaseID).Debugf("push ack %s", ack.Hash().String()) h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) k := heightToKey(h) var enc *bytes.Buffer @@ -613,6 +620,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).Debug("produced new block") // Advise new block to the other peers var ( @@ -652,6 +660,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).WithError(err).Error("failed to advise new block") } }(s) @@ -693,6 +702,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).WithError(err).Debug( "Failed to fetch block from peer") } else { @@ -710,6 +720,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).Debug( "Fetch block from remote peer successfully") succ = true @@ -725,6 +736,7 @@ func (c *Chain) syncHead() { "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), + "db": c.databaseID, }).Debug( "Cannot get block from any peer") } @@ -750,6 +762,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).Debug("run current turn") if c.rt.getHead().Height < c.rt.getNextTurn()-1 { @@ -760,6 +773,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).Error("A block will be skipped") } @@ -773,6 +787,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { "time": c.rt.getChainTimeString(), "curr_turn": c.rt.getNextTurn(), "using_timestamp": now.Format(time.RFC3339Nano), + "db": c.databaseID, }).WithError(err).Error( "Failed to produce block") } @@ -796,6 +811,7 @@ func (c *Chain) mainCycle(ctx context.Context) { // "head_block": c.rt.getHead().Head.String(), // "using_timestamp": t.Format(time.RFC3339Nano), // "duration": d, + // "db": c.databaseID, //}).Debug("main cycle") time.Sleep(d) } else { @@ -810,6 +826,7 @@ func (c *Chain) sync() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("synchronizing chain state") for { @@ -861,6 +878,7 @@ func (c *Chain) processBlocks(ctx context.Context) { log.WithFields(log.Fields{ "height": h, "stashs": len(stash), + "db": c.databaseID, }).Debug("read new height from channel") if stash != nil { wg.Add(1) @@ -877,6 +895,7 @@ func (c *Chain) processBlocks(ctx context.Context) { "head_block": c.rt.getHead().Head.String(), "block_height": height, "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).Debug("processing new block") if height > c.rt.getNextTurn()-1 { @@ -896,6 +915,7 @@ func (c *Chain) processBlocks(ctx context.Context) { "head_block": c.rt.getHead().Head.String(), "block_height": height, "block_hash": block.BlockHash().String(), + "db": c.databaseID, }).WithError(err).Error("Failed to check and push new block") } else { head := c.rt.getHead() @@ -903,7 +923,7 @@ func (c *Chain) processBlocks(ctx context.Context) { if currentCount%c.updatePeriod == 0 { ub, err := c.billing(head.node) if err != nil { - log.WithError(err).Error("billing failed") + log.WithError(err).WithField("db", c.databaseID).Error("billing failed") } // allocate nonce nonceReq := &types.NextAccountNonceReq{} @@ -911,20 +931,20 @@ func (c *Chain) processBlocks(ctx context.Context) { nonceReq.Addr = *c.addr if err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { // allocate nonce failed - log.WithError(err).Warning("allocate nonce for transaction failed") + log.WithError(err).WithField("db", c.databaseID).Warning("allocate nonce for transaction failed") } ub.Nonce = nonceResp.Nonce if err = ub.Sign(c.pk); err != nil { - log.WithError(err).Warning("sign tx failed") + log.WithError(err).WithField("db", c.databaseID).Warning("sign tx failed") } addTxReq := &types.AddTxReq{TTL: 1} addTxResp := &types.AddTxResp{} addTxReq.Tx = ub - log.Debugf("nonce in processBlocks: %d, addr: %s", + log.WithField("db", c.databaseID).Debugf("nonce in processBlocks: %d, addr: %s", addTxReq.Tx.GetAccountNonce(), addTxReq.Tx.GetAccountAddress()) if err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp); err != nil { - log.WithError(err).Warning("send tx failed") + log.WithError(err).WithField("db", c.databaseID).Warning("send tx failed") } } } @@ -957,11 +977,13 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("stopping chain") c.rt.stop(c.databaseID) log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).Debug("chain service and workers stopped") // Close LevelDB file var ierr error @@ -971,6 +993,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain database closed") if ierr = c.tdb.Close(); ierr != nil && err == nil { err = ierr @@ -978,6 +1001,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain database closed") // Close state if ierr = c.st.Close(false); ierr != nil && err == nil { @@ -986,6 +1010,7 @@ func (c *Chain) Stop() (err error) { log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), + "db": c.databaseID, }).WithError(ierr).Debug("chain state storage closed") return } @@ -1035,6 +1060,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { "blockparent": block.ParentHash().String(), "headblock": head.Head.String(), "headheight": head.Height, + "db": c.databaseID, }).WithError(err).Debug("checking new block from other peer") if head.Height == height && head.Head.IsEqual(block.BlockHash()) { @@ -1067,6 +1093,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { "time": c.rt.getChainTimeString(), "expected": next, "actual": index, + "db": c.databaseID, }).WithError(err).Error( "Failed to check new block") return ErrInvalidProducer @@ -1235,13 +1262,14 @@ func (c *Chain) stat() { "response_header_count": rc, "query_tracker_count": tc, "cached_block_count": bc, + "db": c.databaseID, }).Info("chain mem stats") // Print xeno stats c.st.Stat(c.databaseID) } func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { - log.Debugf("begin to billing from count %d", node.count) + log.WithField("db", c.databaseID).Debugf("begin to billing from count %d", node.count) var ( i, j uint64 minerAddr proto.AccountAddress @@ -1260,11 +1288,11 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { } for _, tx := range block.QueryTxs { if minerAddr, err = crypto.PubKeyHash(tx.Response.Signee); err != nil { - log.WithError(err).Warning("billing fail: miner addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return } if userAddr, err = crypto.PubKeyHash(tx.Request.Header.Signee); err != nil { - log.WithError(err).Warning("billing fail: miner addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return } @@ -1285,11 +1313,11 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { for _, req := range block.FailedReqs { if minerAddr, err = crypto.PubKeyHash(block.Signee()); err != nil { - log.WithError(err).Warning("billing fail: miner addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return } if userAddr, err = crypto.PubKeyHash(req.Header.Signee); err != nil { - log.WithError(err).Warning("billing fail: user addr") + log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: user addr") return } @@ -1306,7 +1334,7 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { i = 0 j = 0 for userAddr, cost := range usersMap { - log.Debugf("user %s, cost %d", userAddr.String(), cost) + log.WithField("db", c.databaseID).Debugf("user %s, cost %d", userAddr.String(), cost) ub.Users[i] = &types.UserCost{ User: userAddr, Cost: cost, From 8b711081fe2c09ad9223c42ea1892c445811bd89 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 9 Jan 2019 21:20:41 +0800 Subject: [PATCH 105/302] Ignore read query on block replay --- xenomint/state.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/xenomint/state.go b/xenomint/state.go index fe41483ac..eccdd22ff 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -471,6 +471,9 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.Lock() defer s.Unlock() for i, q := range block.QueryTxs { + if q.Request.Header.QueryType == types.ReadQuery { + continue + } var query = &QueryTracker{Req: q.Request, Resp: &types.Response{Header: *q.Response}} lastsp = s.getSeq() if q.Response.ResponseHeader.LogOffset > lastsp { @@ -487,9 +490,6 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) } // Replay query for j, v := range q.Request.Payload.Queries { - if q.Request.Header.QueryType == types.ReadQuery { - continue - } if q.Request.Header.QueryType != types.WriteQuery { err = errors.Wrapf(ErrInvalidRequest, "replay block at %d:%d", i, j) return From a9fdbd1ad86f404c10727c3d119995bc0e374eda Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 10 Jan 2019 16:14:30 +0800 Subject: [PATCH 106/302] Update test case request query type --- xenomint/state_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/xenomint/state_test.go b/xenomint/state_test.go index c6dec59bd..e5b478a00 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -431,9 +431,12 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), // Try to replay modified block #0 var blockx = &types.Block{ QueryTxs: []*types.QueryAsTx{ - &types.QueryAsTx{ + { Request: &types.Request{ Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + }, DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ DataHash: [32]byte{ 0, 0, 0, 0, 0, 0, 0, 1, From 0040e4a84f3b79ecd4523328b89d87581181fb5a Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Thu, 10 Jan 2019 16:43:19 +0800 Subject: [PATCH 107/302] Fix nil map panic in SQLChain --- sqlchain/chain.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cad0ec084..0cc7e80b0 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1292,6 +1292,9 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { log.WithError(err).Warning("billing fail: user addr") return } + if _, ok := minersMap[userAddr][minerAddr]; !ok { + minersMap[userAddr] = make(map[proto.AccountAddress]uint64) + } minersMap[userAddr][minerAddr] += uint64(len(req.Payload.Queries)) usersMap[userAddr] += uint64(len(req.Payload.Queries)) From 34515a920aefeec09cc413723a30b0daf87cf5e2 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Thu, 10 Jan 2019 16:50:31 +0800 Subject: [PATCH 108/302] Increate waiting time --- cmd/cql-minerd/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index dbe5258f1..b190e2f6b 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -543,7 +543,7 @@ func TestFullProcess(t *testing.T) { c.So(err, ShouldBeNil) }) - ctx2, ccl2 := context.WithTimeout(context.Background(), 1*time.Minute) + ctx2, ccl2 := context.WithTimeout(context.Background(), 3*time.Minute) defer ccl2() err = waitProfileChecking(ctx2, 3*time.Second, dbID, func(profile *types.SQLChainProfile) bool { for _, user := range profile.Users { From 1619069b0837c6b4120721c73ecb88f33741a891 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Thu, 10 Jan 2019 17:18:36 +0800 Subject: [PATCH 109/302] Prune code --- sqlchain/chain.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 0cc7e80b0..c45c01ae5 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1268,16 +1268,13 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { return } + if _, ok := minersMap[userAddr]; !ok { + minersMap[userAddr] = make(map[proto.AccountAddress]uint64) + } if tx.Request.Header.QueryType == types.ReadQuery { - if _, ok := minersMap[userAddr]; !ok { - minersMap[userAddr] = make(map[proto.AccountAddress]uint64) - } minersMap[userAddr][minerAddr] += tx.Response.RowCount usersMap[userAddr] += tx.Response.RowCount } else { - if _, ok := minersMap[userAddr]; !ok { - minersMap[userAddr] = make(map[proto.AccountAddress]uint64) - } minersMap[userAddr][minerAddr] += uint64(tx.Response.AffectedRows) usersMap[userAddr] += uint64(tx.Response.AffectedRows) } From 36927f93b293e6ec73f4a4202451d456217739a8 Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 10 Jan 2019 18:30:52 +0800 Subject: [PATCH 110/302] Add benchmark Persistent Call parallel with 1k payload --- rpc/rpcutil_test.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 2c3ccef32..90a432095 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -357,7 +357,6 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { }) b.Run("benchmark Persistent Call parallel Nil", func(b *testing.B) { - b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { @@ -369,6 +368,18 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { }) }) + b.Run("benchmark Persistent Call parallel 1k", func(b *testing.B) { + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + err := client.Call("DHT.Nil", strings.Repeat("a", 1000), nil) + if err != nil { + b.Error(err) + } + } + }) + }) + req := &proto.FindNeighborReq{ ID: "1234567812345678123456781234567812345678123456781234567812345678", Count: 10, From a10f64e8b3c0289242abab5ec87bc9db34d4061d Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 10 Jan 2019 22:24:47 +0800 Subject: [PATCH 111/302] Fix API node bootstrap issue 1. use role Client to bootstrap the API node; 2. update BPGenesisInfo.Timestamp to a newer timestamp. --- blockproducer/bpinfo.go | 24 +++-- blockproducer/chain.go | 16 ++- cmd/cql-observer/config_test.go | 4 +- test/leak/client.yaml | 47 +++++---- test/leak/leader.yaml | 28 +++--- test/node_0/config.yaml | 136 +++++++++++++------------- test/pool/client.yaml | 47 +++++---- test/pool/leader.yaml | 28 +++--- test/service/apinode_0/config.yaml | 77 +++++++++++++++ test/service/node_0/config.yaml | 136 +++++++++++++------------- test/service/node_1/config.yaml | 136 +++++++++++++------------- test/service/node_2/config.yaml | 136 +++++++++++++------------- test/service/node_adapter/config.yaml | 136 +++++++++++++------------- test/service/node_c/config.yaml | 136 +++++++++++++------------- 14 files changed, 590 insertions(+), 497 deletions(-) create mode 100644 test/service/apinode_0/config.yaml diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index d89732dd3..c95a7ec52 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -35,7 +35,7 @@ func (i *blockProducerInfo) String() string { } func buildBlockProducerInfos( - localNodeID proto.NodeID, peers *proto.Peers, + localNodeID proto.NodeID, peers *proto.Peers, isAPINode bool, ) ( localBPInfo *blockProducerInfo, bpInfos []*blockProducerInfo, err error, ) { @@ -45,11 +45,6 @@ func buildBlockProducerInfos( found bool ) - if index, found = peers.Find(localNodeID); !found { - err = ErrLocalNodeNotFound - return - } - bpInfos = make([]*blockProducerInfo, total) for i, v := range peers.PeersHeader.Servers { var role = "F" @@ -63,6 +58,23 @@ func buildBlockProducerInfos( nodeID: v, } } + + if isAPINode { + localBPInfo = &blockProducerInfo{ + rank: 0, + total: uint32(total), + role: "A", + nodeID: localNodeID, + } + return localBPInfo, bpInfos, nil + } + + if index, found = peers.Find(localNodeID); !found { + err = ErrLocalNodeNotFound + return + } + localBPInfo = bpInfos[index] + return } diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 83e7bc381..3c63038be 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -184,7 +184,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } // Setup peer list - if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers); err != nil { + if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers, cfg.Mode == "api"); err != nil { return } if t = cfg.ConfirmThreshold; t <= 0.0 { @@ -905,16 +905,22 @@ func (c *Chain) getLocalBPInfo() *blockProducerInfo { return c.localBPInfo } +// getRemoteBPInfos remove this node from the peer list func (c *Chain) getRemoteBPInfos() (remoteBPInfos []*blockProducerInfo) { var localBPInfo, bpInfos = func() (*blockProducerInfo, []*blockProducerInfo) { c.RLock() defer c.RUnlock() return c.localBPInfo, c.bpInfos }() - remoteBPInfos = make([]*blockProducerInfo, 0, localBPInfo.total-1) - remoteBPInfos = append(remoteBPInfos, bpInfos[0:localBPInfo.rank]...) - remoteBPInfos = append(remoteBPInfos, bpInfos[localBPInfo.rank+1:]...) - return + + for _, info := range bpInfos { + if info.nodeID.IsEqual(&localBPInfo.nodeID) { + continue + } + remoteBPInfos = append(remoteBPInfos, info) + } + + return remoteBPInfos } func (c *Chain) lastIrreversibleBlock() *blockNode { diff --git a/cmd/cql-observer/config_test.go b/cmd/cql-observer/config_test.go index 9dc0d20c9..e542af247 100644 --- a/cmd/cql-observer/config_test.go +++ b/cmd/cql-observer/config_test.go @@ -89,7 +89,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: @@ -223,7 +223,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/leak/client.yaml b/test/leak/client.yaml index 77bee3fca..ca8416194 100644 --- a/test/leak/client.yaml +++ b/test/leak/client.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,24 +37,23 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2331 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client - + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2331 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client diff --git a/test/leak/leader.yaml b/test/leak/leader.yaml index 93ddef97b..fdd9b219e 100644 --- a/test/leak/leader.yaml +++ b/test/leak/leader.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,14 +42,14 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2331 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2331 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader diff --git a/test/node_0/config.yaml b/test/node_0/config.yaml index 52ce19b51..5993a318b 100644 --- a/test/node_0/config.yaml +++ b/test/node_0/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,68 +42,68 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2122 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 127.0.0.1:2121 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 127.0.0.1:2120 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 127.0.0.1:2144 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 127.0.0.1:2145 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 127.0.0.1:2146 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:2121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:2120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/pool/client.yaml b/test/pool/client.yaml index 7bb71b3ee..208481519 100644 --- a/test/pool/client.yaml +++ b/test/pool/client.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,24 +37,23 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2530 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client - + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2530 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client diff --git a/test/pool/leader.yaml b/test/pool/leader.yaml index 17403f548..e7fd47f13 100644 --- a/test/pool/leader.yaml +++ b/test/pool/leader.yaml @@ -17,10 +17,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -37,14 +37,14 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:2530 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:2530 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader diff --git a/test/service/apinode_0/config.yaml b/test/service/apinode_0/config.yaml new file mode 100644 index 000000000..4e0192e9b --- /dev/null +++ b/test/service/apinode_0/config.yaml @@ -0,0 +1,77 @@ +IsTestMode: true +StartupSyncHoles: true +WorkingRoot: ./ +PubKeyStoreFile: public.keystore +PrivateKeyFile: private.key +DHTFileName: dht.db +ListenAddr: 172.254.1.11:4661 +ThisNodeID: 00000041772ecd779c68a3928d12675d9a65dce02f2ad6907f2cf53013f7e652 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: "2019-01-10T12:49:07+08:00" + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000041772ecd779c68a3928d12675d9a65dce02f2ad6907f2cf53013f7e652 + Role: Client + Addr: 172.254.1.11:4661 + PublicKey: 03ff62aa105dc94c2cea1e3e150a5fafbceb230868b7ed0b0f950915499dfeeadd + Nonce: + a: 3631427 + b: 3627950475 + c: 0 + d: 0 +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/test/service/node_0/config.yaml b/test/service/node_0/config.yaml index a990c8734..31391385e 100644 --- a/test/service/node_0/config.yaml +++ b/test/service/node_0/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_1/config.yaml b/test/service/node_1/config.yaml index 029cce62a..5e2fa7460 100644 --- a/test/service/node_1/config.yaml +++ b/test/service/node_1/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_2/config.yaml b/test/service/node_2/config.yaml index 056e1ce3b..413220a15 100644 --- a/test/service/node_2/config.yaml +++ b/test/service/node_2/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,66 +57,66 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/service/node_adapter/config.yaml b/test/service/node_adapter/config.yaml index 7e281ee34..98d362e5f 100644 --- a/test/service/node_adapter/config.yaml +++ b/test/service/node_adapter/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,7 +42,7 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -57,69 +57,69 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner Adapter: ListenAddr: 0.0.0.0:4661 CertificatePath: ./server.test.covenantsql.io.pem diff --git a/test/service/node_c/config.yaml b/test/service/node_c/config.yaml index 3e00d1d89..94b974476 100644 --- a/test/service/node_c/config.yaml +++ b/test/service/node_c/config.yaml @@ -22,10 +22,10 @@ MinNodeIDDifficulty: 2 DNSSeed: EnforcedDNSSEC: false DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" @@ -42,71 +42,71 @@ BlockProducer: Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z + Timestamp: "2019-01-10T12:49:07+08:00" KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:11099 #172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 127.0.0.1:11100 #172.254.1.3:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 127.0.0.1:11101 #172.254.1.4:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 127.0.0.1:11102 #172.254.1.5:4661 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 127.0.0.1:11103 #172.254.1.6:4661 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 127.0.0.1:11104 #172.254.1.7:4661 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner + - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:11099 #172.254.1.2:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader + - ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:11100 #172.254.1.3:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:11101 #172.254.1.4:4661 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower + - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client + - ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:11102 #172.254.1.5:4661 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner + - ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:11103 #172.254.1.6:4661 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner + - ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:11104 #172.254.1.7:4661 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner Adapter: ListenAddr: 0.0.0.0:4661 CertificatePath: ./server.test.covenantsql.io.pem From 2801fd8b1a70f5f99576f90812ccab92a8f5557c Mon Sep 17 00:00:00 2001 From: Ggicci Date: Thu, 10 Jan 2019 22:26:50 +0800 Subject: [PATCH 112/302] Add covenantsql_api_0 service in docker-compose.yml --- docker-compose.yml | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index 592c67902..c8b5c41c0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -171,6 +171,26 @@ services: driver: "json-file" options: max-size: "10m" + covenantsql_api_0: + image: covenantsql/covenantsql:latest + container_name: covenantsql_api_0 + restart: always + ports: + - "11110:8546" + command: ["-mode", "api", "-wsapi", ":8546"] + # entrypoint: ["sh"] + environment: + COVENANT_ROLE: blockproducer + COVENANT_CONF: ./apinode_0/config.yaml + volumes: + - ./test/service/apinode_0/:/app/apinode_0/ + networks: + default: + ipv4_address: 172.254.1.11 + logging: + driver: "json-file" + options: + max-size: "10m" networks: default: From 74636c1aeebdc3157f34544a254a18410db4cf1a Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 10:44:57 +0800 Subject: [PATCH 113/302] Add target miner list for miner benchmarking --- cmd/cql-minerd/integration_test.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index b190e2f6b..8226594aa 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -822,6 +822,12 @@ func BenchmarkSQLite(b *testing.B) { } func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { + benchOutsideMinerWithTargetMinerList(b, minerCount, nil, confDir) +} + +func benchOutsideMinerWithTargetMinerList( + b *testing.B, minerCount uint16, targetMiners []proto.AccountAddress, confDir string, +) { log.Warnf("benchmark %v for %d Miners:", confDir, minerCount) // Create temp directory @@ -851,9 +857,13 @@ func benchOutsideMiner(b *testing.B, minerCount uint16, confDir string) { var dsn string if minerCount > 0 { // create - meta := client.ResourceMeta{} - meta.Node = minerCount - meta.AdvancePayment = 1000000000 + meta := client.ResourceMeta{ + ResourceMeta: types.ResourceMeta{ + TargetMiners: targetMiners, + Node: minerCount, + }, + AdvancePayment: 1000000000, + } // wait for chain service var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel1() From d6b9b2873186e7d16496cc41459ebcd3377f3985 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 11:27:47 +0800 Subject: [PATCH 114/302] Add testnet benchmark with target miners --- cmd/cql-minerd/integration_test.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 8226594aa..7746d8384 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -48,6 +48,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" + yaml "gopkg.in/yaml.v2" ) var ( @@ -982,6 +983,32 @@ func BenchmarkTestnetMiner2(b *testing.B) { }) } +func BenchmarkTestnetTargetMiner2(b *testing.B) { + var ( + err error + // Public keys of miners for test + publicKeys = []string{ + "0235abfb93031df7bf776332c510a862e48e81eebea76f5e165406af8fec5215d6", + "03aec5337c0a58b8eff96f8ab30518830ad8e329c74bb30b38901a9395c72340f8", + } + ) + Convey("bench testnet one node", b, func() { + var ( + pubKey asymmetric.PublicKey + addr proto.AccountAddress + targetMiners = make([]proto.AccountAddress, len(publicKeys)) + ) + for i, v := range publicKeys { + err = yaml.Unmarshal([]byte(v), &pubKey) + So(err, ShouldBeNil) + addr, err = crypto.PubKeyHash(&pubKey) + So(err, ShouldBeNil) + targetMiners[i] = addr + } + benchOutsideMinerWithTargetMinerList(b, 2, targetMiners, testnetConfDir) + }) +} + func BenchmarkTestnetMiner3(b *testing.B) { Convey("bench testnet one node", b, func() { benchOutsideMiner(b, 3, testnetConfDir) From 6cd69a05a4975f6cdef94f642a389be85f3715d7 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 13:23:33 +0800 Subject: [PATCH 115/302] Fix height overflow, config required reachable peers on startup --- blockproducer/chain.go | 56 +++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 23 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 83e7bc381..868898cbe 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -371,19 +371,29 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { func (c *Chain) syncHeads() { for { - var h = c.heightOfTime(c.now()) - if c.getNextHeight() > h { + var ( + now = c.now() + nowHeight uint32 + ) + if now.Before(c.genesisTime) { + log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), + }).Info("now time is before genesis time, waiting for genesis") break } - for c.getNextHeight() <= h { + if nowHeight = c.heightOfTime(c.now()); c.getNextHeight() > nowHeight { + break + } + for c.getNextHeight() <= nowHeight { // TODO(leventeliu): use the test mode flag to bypass the long-running synchronizing // on startup by now, need better solution here. if conf.GConf.StartupSyncHoles { log.WithFields(log.Fields{ + "local": c.getLocalBPInfo(), "next_height": c.getNextHeight(), - "height": h, + "now_height": nowHeight, }).Debug("synchronizing head blocks") - c.syncCurrentHead(c.ctx) + c.syncCurrentHead(c.ctx, 2) } c.increaseNextHeight() } @@ -509,7 +519,7 @@ func (c *Chain) mainCycle(ctx context.Context) { select { case <-timer.C: // Try to fetch block at height `nextHeight-1` until enough peers are reachable - if err := c.blockingSyncCurrentHead(ctx); err != nil { + if err := c.blockingSyncCurrentHead(ctx, c.getRequiredConfirms()); err != nil { log.WithError(err).Info("abort main cycle") timer.Reset(0) return @@ -537,7 +547,7 @@ func (c *Chain) mainCycle(ctx context.Context) { } } -func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { +func (c *Chain) blockingSyncCurrentHead(ctx context.Context, requiredReachable uint32) (err error) { var ( ticker *time.Ticker interval = 1 * time.Second @@ -550,7 +560,7 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { for { select { case <-ticker.C: - if c.syncCurrentHead(ctx) { + if c.syncCurrentHead(ctx, requiredReachable) { return } case <-ctx.Done(): @@ -561,11 +571,11 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context) (err error) { } // syncCurrentHead synchronizes a block at the current height of the local peer from the known -// remote peers. The return value `ok` indicates that there're at least `c.confirms-1` replies -// from these gossip calls. -func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { - var h = c.getNextHeight() - 1 - if c.head().height >= h { +// remote peers. The return value `ok` indicates that there're at least `requiredReachable-1` +// replies from these gossip calls. +func (c *Chain) syncCurrentHead(ctx context.Context, requiredReachable uint32) (ok bool) { + var currentHeight = c.getNextHeight() - 1 + if c.head().height >= currentHeight { ok = true return } @@ -573,20 +583,14 @@ func (c *Chain) syncCurrentHead(ctx context.Context) (ok bool) { // Initiate blocking gossip calls to fetch block of the current height, // with timeout of one tick. var ( - unreachable = c.blockingFetchBlock(ctx, h) - - needConfirms, serversNum = func() (cf, sn uint32) { - c.RLock() - defer c.RUnlock() - cf, sn = c.confirms, c.localBPInfo.total - return - }() + unreachable = c.blockingFetchBlock(ctx, currentHeight) + serversNum = c.getLocalBPInfo().total ) - if ok = unreachable+needConfirms <= serversNum; !ok { + if ok = unreachable+requiredReachable <= serversNum; !ok { log.WithFields(log.Fields{ "peer": c.getLocalBPInfo(), - "sync_head_height": h, + "sync_head_height": currentHeight, "unreachable_count": unreachable, }).Warn("one or more block producers are currently unreachable") } @@ -893,6 +897,12 @@ func (c *Chain) heightOfTime(t time.Time) uint32 { return uint32(t.Sub(c.genesisTime) / c.period) } +func (c *Chain) getRequiredConfirms() uint32 { + c.RLock() + defer c.RUnlock() + return c.confirms +} + func (c *Chain) getNextHeight() uint32 { c.RLock() defer c.RUnlock() From edd3f0047bf457d7bd5f79697fe213eb2b75ab84 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 14:29:21 +0800 Subject: [PATCH 116/302] Move limits package to conf, add test case for genesis startup --- blockproducer/branch.go | 8 +++---- blockproducer/chain.go | 13 +++++------ blockproducer/chain_test.go | 29 +++++++++++++++++++++++- blockproducer/config.go | 4 ---- {blockproducer/limits => conf}/limits.go | 3 +-- conf/parameters.go | 8 +++++++ 6 files changed, 47 insertions(+), 18 deletions(-) rename {blockproducer/limits => conf}/limits.go (92%) diff --git a/blockproducer/branch.go b/blockproducer/branch.go index 93592fec4..fcba6e5dc 100644 --- a/blockproducer/branch.go +++ b/blockproducer/branch.go @@ -22,7 +22,7 @@ import ( "time" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - pl "github.com/CovenantSQL/CovenantSQL/blockproducer/limits" + "github.com/CovenantSQL/CovenantSQL/conf" ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" @@ -57,7 +57,7 @@ func newBranch( } // Apply new blocks to view and pool for _, bn := range list { - if len(bn.block.Transactions) > pl.MaxTransactionsPerBlock { + if len(bn.block.Transactions) > conf.MaxTransactionsPerBlock { return nil, ErrTooManyTransactionsInBlock } @@ -132,7 +132,7 @@ func (b *branch) applyBlock(n *blockNode) (br *branch, err error) { } var cpy = b.makeArena() - if len(n.block.Transactions) > pl.MaxTransactionsPerBlock { + if len(n.block.Transactions) > conf.MaxTransactionsPerBlock { return nil, ErrTooManyTransactionsInBlock } @@ -185,7 +185,7 @@ func (b *branch) produceBlock( cpy = b.makeArena() txs = cpy.sortUnpackedTxs() ierr error - packCount = pl.MaxTransactionsPerBlock + packCount = conf.MaxTransactionsPerBlock ) if len(txs) < packCount { diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 868898cbe..0f119b4ad 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -25,7 +25,6 @@ import ( "time" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - pl "github.com/CovenantSQL/CovenantSQL/blockproducer/limits" "github.com/CovenantSQL/CovenantSQL/chainbus" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" @@ -188,7 +187,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) return } if t = cfg.ConfirmThreshold; t <= 0.0 { - t = float64(2) / 3.0 + t = conf.DefaultConfirmThreshold } if m = uint32(math.Ceil(float64(l)*t + 1)); m > l { m = l @@ -393,7 +392,7 @@ func (c *Chain) syncHeads() { "next_height": c.getNextHeight(), "now_height": nowHeight, }).Debug("synchronizing head blocks") - c.syncCurrentHead(c.ctx, 2) + c.blockingSyncCurrentHead(c.ctx, conf.BPStartupRequiredReachableCount) } c.increaseNextHeight() } @@ -473,18 +472,18 @@ func (c *Chain) processAddTxReq(addTxReq *types.AddTxReq) { le.WithError(err).Warn("failed to load base nonce of transaction account") return } - if nonce < base || nonce >= base+pl.MaxPendingTxsPerAccount { + if nonce < base || nonce >= base+conf.MaxPendingTxsPerAccount { // TODO(leventeliu): should persist to somewhere for tx query? le.WithFields(log.Fields{ "base_nonce": base, - "pending_limit": pl.MaxPendingTxsPerAccount, + "pending_limit": conf.MaxPendingTxsPerAccount, }).Warn("invalid transaction nonce") return } // Broadcast to other block producers - if ttl > pl.MaxTxBroadcastTTL { - ttl = pl.MaxTxBroadcastTTL + if ttl > conf.MaxTxBroadcastTTL { + ttl = conf.MaxTxBroadcastTTL } if ttl > 0 { c.nonblockingBroadcastTx(ttl-1, tx) diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 61e41668b..58fd88bf0 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -145,6 +145,32 @@ func TestChain(t *testing.T) { Tick: time.Duration(300 * time.Millisecond), } + Convey("A new chain running before genesis time should be waiting for genesis", func() { + config.Genesis.SignedHeader.Timestamp = time.Now().Add(24 * time.Hour) + err = genesis.PackAndSignBlock(testingPrivateKey) + So(err, ShouldBeNil) + chain, err = NewChain(config) + So(err, ShouldBeNil) + + var sv = rpc.NewServer() + err = sv.InitRPCServer("localhost:0", testingPrivateKeyFile, []byte{}) + So(err, ShouldBeNil) + defer sv.Stop() + chain.server = sv + chain.confirms = 1 + chain.Start() + defer func() { + err = chain.Stop() + So(err, ShouldBeNil) + chain = nil + }() + time.Sleep(5 * chain.period) + var _, count, height, err = chain.fetchLastIrreversibleBlock() + So(err, ShouldBeNil) + So(count, ShouldEqual, 0) + So(height, ShouldEqual, 0) + }) + chain, err = NewChain(config) So(err, ShouldBeNil) So(chain, ShouldNotBeNil) @@ -345,7 +371,8 @@ func TestChain(t *testing.T) { chain.confirms = 1 chain.Start() defer func() { - chain.Stop() + err = chain.Stop() + So(err, ShouldBeNil) chain = nil }() chain.addTx(&types.AddTxReq{TTL: 1, Tx: t1}) diff --git a/blockproducer/config.go b/blockproducer/config.go index d9d4c3ae1..1cb6b03dc 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -24,10 +24,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) -const ( - blockVersion int32 = 0x01 -) - // Config is the main chain configuration. type Config struct { Mode string diff --git a/blockproducer/limits/limits.go b/conf/limits.go similarity index 92% rename from blockproducer/limits/limits.go rename to conf/limits.go index 1e1f96405..8c12c08cb 100644 --- a/blockproducer/limits/limits.go +++ b/conf/limits.go @@ -14,8 +14,7 @@ * limitations under the License. */ -// Package limits defines limits of the CovenantSQL system. -package limits +package conf const ( // MaxTxBroadcastTTL defines the TTL limit of a AddTx request broadcasting within the diff --git a/conf/parameters.go b/conf/parameters.go index 8fbc04951..64f8bc53c 100644 --- a/conf/parameters.go +++ b/conf/parameters.go @@ -18,6 +18,7 @@ package conf import "time" +// This parameters should be kept consistent in all BPs. const ( // BPPeriod is the block producer block produce period. BPPeriod = 3 * time.Second @@ -29,4 +30,11 @@ const ( SQLChainTick = 1 * time.Second // SQLChainTTL is the sqlchain unack query billing ttl. SQLChainTTL = 10 + + DefaultConfirmThreshold = float64(2) / 3.0 +) + +// This parameters will not cause inconsistency within certain range. +const ( + BPStartupRequiredReachableCount = 2 // NOTE: this includes myself ) From 2729656ddcb94755a1f41f2ff51823c7a3ddadfd Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 15:02:17 +0800 Subject: [PATCH 117/302] Remove unused parameters --- conf/parameters.go | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/conf/parameters.go b/conf/parameters.go index 64f8bc53c..6952e83c0 100644 --- a/conf/parameters.go +++ b/conf/parameters.go @@ -20,17 +20,6 @@ import "time" // This parameters should be kept consistent in all BPs. const ( - // BPPeriod is the block producer block produce period. - BPPeriod = 3 * time.Second - // BPTick is the block produce block fetch tick. - BPTick = 1 * time.Second - // SQLChainPeriod is the sqlchain block produce period. - SQLChainPeriod = 3 * time.Second - // SQLChainTick is the sqlchain block fetch tick. - SQLChainTick = 1 * time.Second - // SQLChainTTL is the sqlchain unack query billing ttl. - SQLChainTTL = 10 - DefaultConfirmThreshold = float64(2) / 3.0 ) From fbbc30e8ba21a9addaac8925fc756765dadd1029 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 15:03:01 +0800 Subject: [PATCH 118/302] Remove unused parameters --- conf/parameters.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/conf/parameters.go b/conf/parameters.go index 6952e83c0..4fa2296ef 100644 --- a/conf/parameters.go +++ b/conf/parameters.go @@ -16,8 +16,6 @@ package conf -import "time" - // This parameters should be kept consistent in all BPs. const ( DefaultConfirmThreshold = float64(2) / 3.0 From b79842926ec44a54a0db5245886a6c2f600a829c Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 11 Jan 2019 16:04:53 +0800 Subject: [PATCH 119/302] Put syncCurrentHead before get tick --- blockproducer/chain.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 0f119b4ad..be25dd3b2 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -557,11 +557,11 @@ func (c *Chain) blockingSyncCurrentHead(ctx context.Context, requiredReachable u ticker = time.NewTicker(interval) defer ticker.Stop() for { + if c.syncCurrentHead(ctx, requiredReachable) { + return + } select { case <-ticker.C: - if c.syncCurrentHead(ctx, requiredReachable) { - return - } case <-ctx.Done(): err = ctx.Err() return From 1e0c131981e5ccb5f7ba75d1fd3acd21327c020c Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 11 Jan 2019 16:42:58 +0800 Subject: [PATCH 120/302] Run BenchmarkTestnetMiner2 in gitlab ci process. --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index ff2061e09..73b0cf720 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,5 +31,6 @@ test-my-project: - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - bash cleanupDB.sh || true - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - + - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ && cd - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) From 85c63b99c369ad2c3bd93e9fa74d4b14e51b2ce3 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 11 Jan 2019 17:03:09 +0800 Subject: [PATCH 121/302] Run BenchmarkTestnetMiner2 on independent line by gitlabci --- .gitlab-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73b0cf720..819e5f5a2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,9 @@ test-my-project: - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - bash cleanupDB.sh || true - - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - - - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ && cd - + - cd cmd/cql-minerd + - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ + - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ + - cd - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) From 1b9e47f1863bc23b958613940325088c28929000 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 11 Jan 2019 11:32:53 +0800 Subject: [PATCH 122/302] Remove unused HTTP stream transport --- api/http_stream.go | 37 ------------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 api/http_stream.go diff --git a/api/http_stream.go b/api/http_stream.go deleted file mode 100644 index 51ae2be85..000000000 --- a/api/http_stream.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" -) - -type httpconn struct { - rw http.ResponseWriter - r *http.Request -} - -// HTTPStream is data stream as jsonrpc2.ObjectStream over HTTP transport. -type HTTPStream struct { - conn httpconn -} - -// NewHTTPStream creates a new HTTPStream. -func NewHTTPStream(conn httpconn) HTTPStream { - return HTTPStream{conn: conn} -} - -// WriteObject implements jsonrpc2.ObjectStream.WriteObject. -func (t HTTPStream) WriteObject(obj interface{}) error { - t.conn.rw.Header().Add("Content-Type", "application/json") - return json.NewEncoder(t.conn.rw).Encode(obj) -} - -// ReadObject implements jsonrpc2.ObjectStream.ReadObject. -func (t HTTPStream) ReadObject(v interface{}) error { - return json.NewDecoder(t.conn.r.Body).Decode(v) -} - -// Close implements jsonrpc2.ObjectStream.Close. -func (t HTTPStream) Close() error { - return t.conn.r.Body.Close() -} From 7a77895f293be77df9c5ce36a5adaa202c1f2c59 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 11 Jan 2019 18:12:42 +0800 Subject: [PATCH 123/302] Refactor bp_getBlockList (pagination) --- api/blocks.go | 26 +++++++++--- api/jsonrpc.go | 2 +- api/models/blocks.go | 49 +++++++++++++++++++--- api/models/pagination.go | 91 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 156 insertions(+), 12 deletions(-) create mode 100644 api/models/pagination.go diff --git a/api/blocks.go b/api/blocks.go index 055968506..dd10291eb 100644 --- a/api/blocks.go +++ b/api/blocks.go @@ -15,24 +15,38 @@ func init() { } type bpGetBlockListParams struct { - From int `json:"from"` - To int `json:"to"` + Since int `json:"since"` + Page int `json:"page"` + Size int `json:"size"` } func (params *bpGetBlockListParams) Validate() error { - diff := params.To - params.From - if diff < 5 || diff > 100 { - return errors.New("to - from should between 5 and 100") + if params.Size > 1000 { + return errors.New("max size is 1000") } return nil } +// BPGetBlockListResponse is the response for method bp_getBlockList. +type BPGetBlockListResponse struct { + Blocks []*models.Block `json:"blocks"` + Pagination *models.Pagination `json:"pagination"` +} + func bpGetBlockList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( result interface{}, err error, ) { params := ctx.Value("_params").(*bpGetBlockListParams) model := models.BlocksModel{} - return model.GetBlockList(params.From, params.To) + blocks, pagination, err := model.GetBlockList(params.Since, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetBlockListResponse{ + Blocks: blocks, + Pagination: pagination, + } + return result, nil } type bpGetBlockByHeightParams struct { diff --git a/api/jsonrpc.go b/api/jsonrpc.go index 8544b6a0c..a2f9db986 100644 --- a/api/jsonrpc.go +++ b/api/jsonrpc.go @@ -17,7 +17,7 @@ var ( type jsonrpcHandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) func registerMethod(method string, handlerFunc jsonrpcHandlerFunc, paramsType interface{}) { - log.WithField("method", method).Info("api: register rpc method") + log.WithField("method", method).Debug("api: register rpc method") if paramsType == nil { jsonrpcHandler.RegisterMethod(method, handlerFunc) diff --git a/api/models/blocks.go b/api/models/blocks.go index 707a84548..d0f3ecc34 100644 --- a/api/models/blocks.go +++ b/api/models/blocks.go @@ -30,11 +30,50 @@ func (b *Block) PostGet(s gorp.SqlExecutor) error { } // GetBlockList get a list of blocks with height in [from, to). -func (m *BlocksModel) GetBlockList(from, to int) (blocks []*Block, err error) { - query := `SELECT height, hash, timestamp, version, producer, merkle_root, parent, tx_count - FROM indexed_blocks WHERE height >= ? and height < ? ORDER BY height DESC` - _, err = chaindb.Select(&blocks, query, from, to) - return blocks, err +func (m *BlocksModel) GetBlockList(since, page, size int) (blocks []*Block, pagination *Pagination, err error) { + var ( + querySQL = ` + SELECT + height, + hash, + timestamp, + version, + producer, + merkle_root, + parent, + tx_count + FROM + indexed_blocks + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + if since > 0 { + conds = append(conds, "height < ?") + args = append(args, since) + } + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + blocks = make([]*Block, 0) + if pagination.Offset() > pagination.Total { + return blocks, pagination, nil + } + + querySQL += " ORDER BY height DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) + + _, err = chaindb.Select(&blocks, querySQL, args...) + return blocks, pagination, err } // GetBlockByHeight get a block by its height. diff --git a/api/models/pagination.go b/api/models/pagination.go new file mode 100644 index 000000000..a96f1b733 --- /dev/null +++ b/api/models/pagination.go @@ -0,0 +1,91 @@ +package models + +import "math" + +// Pagination holds paging info for list like API. +type Pagination struct { + Page int `json:"page"` + Size int `json:"size"` + Total int `json:"total"` + Pages int `json:"pages"` + + defaultSize int +} + +// PaginationOpt represents extra pagination options to apply. +type PaginationOpt func(*Pagination) + +// WithDefaultSize set pagination default size. +func WithDefaultSize(size int) PaginationOpt { + return func(p *Pagination) { + if size < 0 { + p.defaultSize = 10 + return + } + p.defaultSize = size + } +} + +// NewPagination creates a new Pagination. +func NewPagination(page, size int, opts ...PaginationOpt) *Pagination { + p := &Pagination{ + Page: page, + Size: size, + defaultSize: 10, + } + + for _, opt := range opts { + if opt != nil { + opt(p) + } + } + + p.normalize() + return p +} + +func (p *Pagination) normalize() { + if p.Page <= 0 { + p.Page = 1 + } + if p.Size <= 0 { + p.Size = p.defaultSize + } + if p.Total <= 0 { + p.Total = 0 + } + + p.Pages = int(math.Ceil(float64(p.Total) / float64(p.Size))) +} + +// SetPage update current page index. +func (p *Pagination) SetPage(page int) { + p.Page = page + p.normalize() +} + +// SetSize update page size. +func (p *Pagination) SetSize(size int) { + p.Size = size + p.normalize() +} + +// SetTotal update the total records. +func (p *Pagination) SetTotal(total int) { + p.Total = total + p.normalize() +} + +// Limit returns the page size. +// Attened to be used in SQL statements. +func (p *Pagination) Limit() int { + p.normalize() + return p.Size +} + +// Offset returns the size of skipped items of current page. +// Attened to be used in SQL statements. +func (p *Pagination) Offset() int { + p.normalize() + return (p.Page - 1) * p.Size +} From f8b0889df3ec886a2b2d875ceaa7f6ef3000f524 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 11 Jan 2019 18:14:24 +0800 Subject: [PATCH 124/302] Add new API: bp_getTransactionListOfBlock 1. refactor bp_getTransactionList to support pagination; 2. add new API: bp_getTransactionListOfBlock. --- api/models/transactions.go | 116 +++++++++++++++++++++++++++++++------ api/transactions.go | 62 +++++++++++++++++--- 2 files changed, 150 insertions(+), 28 deletions(-) diff --git a/api/models/transactions.go b/api/models/transactions.go index 302f1b0f9..130b9ca51 100644 --- a/api/models/transactions.go +++ b/api/models/transactions.go @@ -3,7 +3,6 @@ package models import ( "database/sql" "encoding/json" - "fmt" "time" "github.com/go-gorp/gorp" @@ -45,29 +44,108 @@ func (m *TransactionsModel) GetTransactionByHash(hash string) (tx *Transaction, return tx, err } +// GetTransactionListOfBlock get a transaction list of block. +func (m *TransactionsModel) GetTransactionListOfBlock(ofBlockHeight int, page, size int) ( + txs []*Transaction, pagination *Pagination, err error, +) { + var ( + querySQL = ` + SELECT + block_height, + tx_index, + hash, + block_hash, + timestamp, + tx_type, + address, + raw + FROM + indexed_transactions + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + conds = append(conds, "block_height = ?") + args = append(args, ofBlockHeight) + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + if pagination.Offset() > pagination.Total { + return txs, pagination, nil + } + + querySQL += " ORDER BY tx_index DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) + + _, err = chaindb.Select(&txs, querySQL, args...) + return txs, pagination, err +} + // GetTransactionList get a transaction list by hash marker. -func (m *TransactionsModel) GetTransactionList(since, direction string, limit int) ( - txs []*Transaction, err error, +func (m *TransactionsModel) GetTransactionList(since string, page, size int) ( + txs []*Transaction, pagination *Pagination, err error, ) { - tx, err := m.GetTransactionByHash(since) - if tx == nil { - return txs, err + var ( + sinceBlockHeight = 0 + sinceTxIndex = 0 + ) + + if since != "" { + tx, err := m.GetTransactionByHash(since) + if tx == nil { + return txs, pagination, err + } + sinceBlockHeight = tx.BlockHeight + sinceTxIndex = tx.TxIndex } - orderBy := "DESC" - compare := "<" - if direction == "forward" { - orderBy = "ASC" - compare = ">" + var ( + querySQL = ` + SELECT + block_height, + tx_index, + hash, + block_hash, + timestamp, + tx_type, + address, + raw + FROM + indexed_transactions + ` + countSQL = buildCountSQL(querySQL) + conds []string + args []interface{} + ) + + pagination = NewPagination(page, size) + if sinceBlockHeight > 0 { + conds = append(conds, "(block_height < ? or (block_height = ? and tx_index < ?))") + args = append(args, sinceBlockHeight, sinceBlockHeight, sinceTxIndex) + } + + querySQL, countSQL = buildSQLWithConds(querySQL, countSQL, conds) + count, err := chaindb.SelectInt(countSQL, args...) + if err != nil { + return nil, pagination, err + } + pagination.SetTotal(int(count)) + if pagination.Offset() > pagination.Total { + return txs, pagination, nil } - query := fmt.Sprintf(`SELECT block_height, tx_index, hash, block_hash, - timestamp, tx_type, address, raw - FROM indexed_transactions - WHERE block_height %s ? or (block_height = ? and tx_index %s ?) - ORDER BY block_height %s, tx_index %s - LIMIT ?`, compare, compare, orderBy, orderBy) + querySQL += " ORDER BY block_height DESC, tx_index DESC" + querySQL += " LIMIT ? OFFSET ?" + args = append(args, pagination.Limit(), pagination.Offset()) - _, err = chaindb.Select(&txs, query, tx.BlockHeight, tx.BlockHeight, tx.TxIndex, limit) - return txs, err + _, err = chaindb.Select(&txs, querySQL, args...) + return txs, pagination, err } diff --git a/api/transactions.go b/api/transactions.go index f613de2db..b43ca7728 100644 --- a/api/transactions.go +++ b/api/transactions.go @@ -12,30 +12,74 @@ import ( func init() { registerMethod("bp_getTransactionList", bpGetTransactionList, bpGetTransactionListParams{}) registerMethod("bp_getTransactionByHash", bpGetTransactionByHash, bpGetTransactionByHashParams{}) + registerMethod("bp_getTransactionListOfBlock", bpGetTransactionListOfBlock, bpGetTransactionListOfBlockParams{}) } type bpGetTransactionListParams struct { - Since string `json:"since"` - Direction string `json:"direction"` - Limit int `json:"limit"` + Since string `json:"since"` + Page int `json:"page"` + Size int `json:"size"` } func (params *bpGetTransactionListParams) Validate() error { - if params.Limit < 5 || params.Limit > 100 { - return errors.New("limit should between 5 and 100") - } - if params.Direction != "backward" && params.Direction != "forward" { - return fmt.Errorf("unknown direction %q", params.Direction) + if params.Size > 1000 { + return errors.New("max size is 1000") } return nil } +// BPGetTransactionListResponse is the response for method bp_getTransactionList. +type BPGetTransactionListResponse struct { + Transactions []*models.Transaction `json:"transactions"` + Pagination *models.Pagination `json:"pagination"` +} + func bpGetTransactionList(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( result interface{}, err error, ) { params := ctx.Value("_params").(*bpGetTransactionListParams) model := models.TransactionsModel{} - return model.GetTransactionList(params.Since, params.Direction, params.Limit) + transactions, pagination, err := model.GetTransactionList(params.Since, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetTransactionListResponse{ + Transactions: transactions, + Pagination: pagination, + } + return result, nil +} + +type bpGetTransactionListOfBlockParams struct { + BlockHeight int `json:"height"` + Page int `json:"page"` + Size int `json:"size"` +} + +func (params *bpGetTransactionListOfBlockParams) Validate() error { + if params.BlockHeight < 1 { + return fmt.Errorf("invalid block height %d", params.BlockHeight) + } + if params.Size > 1000 { + return errors.New("max size is 1000") + } + return nil +} + +func bpGetTransactionListOfBlock(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + params := ctx.Value("_params").(*bpGetTransactionListOfBlockParams) + model := models.TransactionsModel{} + transactions, pagination, err := model.GetTransactionListOfBlock(params.BlockHeight, params.Page, params.Size) + if err != nil { + return nil, err + } + result = &BPGetTransactionListResponse{ + Transactions: transactions, + Pagination: pagination, + } + return result, nil } type bpGetTransactionByHashParams struct { From 851500819718be25a8219bde87c3154614d0a4b9 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 11 Jan 2019 18:15:49 +0800 Subject: [PATCH 125/302] Update integration test cases --- api/service_test.go | 192 +++++++++++++++++++++++++++++++++++--------- 1 file changed, 156 insertions(+), 36 deletions(-) diff --git a/api/service_test.go b/api/service_test.go index 6e922dfd4..9279a3741 100644 --- a/api/service_test.go +++ b/api/service_test.go @@ -164,6 +164,22 @@ func setupWebsocketClient(addr string) (client *jsonrpc2.Conn, err error) { return client, err } +type bpGetBlockListTestCase struct { + Since int + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination +} + +func (c *bpGetBlockListTestCase) String() string { + return fmt.Sprintf("fetch %d blocks at page %d since %d", c.Size, c.Page, c.Since) +} + +func (c *bpGetBlockListTestCase) Params() interface{} { + return []interface{}{c.Since, c.Page, c.Size} +} + type bpGetBlockTestCase struct { Height int Hash string @@ -175,18 +191,35 @@ func (c *bpGetBlockTestCase) String() string { } type bpGetTransactionListTestCase struct { - Since string - Direction string - Limit int - ExpectedResults [][]interface{} + Since string + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination } func (c *bpGetTransactionListTestCase) Params() interface{} { - return []interface{}{c.Since, c.Direction, c.Limit} + return []interface{}{c.Since, c.Page, c.Size} } func (c *bpGetTransactionListTestCase) String() string { - return fmt.Sprintf("fetch %d transactions %s since %s", c.Limit, c.Direction, c.Since) + return fmt.Sprintf("fetch %d transactions at page %d since %s", c.Size, c.Page, c.Since) +} + +type bpGetTransactionListOfBlockTestCase struct { + BlockHeight int + Page int + Size int + ExpectedResults [][]interface{} + ExpectedPagination *models.Pagination +} + +func (c *bpGetTransactionListOfBlockTestCase) Params() interface{} { + return []interface{}{c.BlockHeight, c.Page, c.Size} +} + +func (c *bpGetTransactionListOfBlockTestCase) String() string { + return fmt.Sprintf("fetch %d transactions at page %d of block %d", c.Size, c.Page, c.BlockHeight) } type bpGetTransactionByHashTestCase struct { @@ -251,6 +284,24 @@ func TestJSONRPCService(t *testing.T) { } ) + Convey("API not found", t, func() { + rpc, err := setupWebsocketClient(addr) + if err != nil { + t.Errorf("failed to connect to wsapi server: %v", err) + return + } + + Convey("call method should fail if method not found", func() { + var result interface{} + err := rpc.Call(context.Background(), "method_NotFound", nil, &result, callOpts...) + So(err, ShouldNotBeNil) + }) + + Reset(func() { + rpc.Close() + }) + }) + Convey("blocks API", t, func() { rpc, err := setupWebsocketClient(addr) if err != nil { @@ -262,8 +313,9 @@ func TestJSONRPCService(t *testing.T) { var ( result []*models.Block testCases = map[string][]int{ - "to-from < 5": {1, 5}, - "to-from > 100": {1, 102}, + "page over 1000": {0, 1, 10001}, + "invalid number of parameters": {0}, + "nil parameters": nil, } ) @@ -278,26 +330,23 @@ func TestJSONRPCService(t *testing.T) { Convey("bp_getBlockList should success on fetching valid number of blocks", func() { var ( - result []*models.Block - testCases = [][]int{ - {1, 6}, - {1, 11}, - {2, 9}, + result = new(api.BPGetBlockListResponse) + testCases = []*bpGetBlockListTestCase{ + {0, 1, 10, blocksMockData[4:14], &models.Pagination{Page: 1, Size: 10, Total: 14, Pages: 2}}, + {14, 1, 5, blocksMockData[8:13], &models.Pagination{Page: 1, Size: 5, Total: 13, Pages: 3}}, + {14, 2, 5, blocksMockData[3:8], &models.Pagination{Page: 2, Size: 5, Total: 13, Pages: 3}}, + {14, 3, 5, blocksMockData[0:3], &models.Pagination{Page: 3, Size: 5, Total: 13, Pages: 3}}, } ) for i, testCase := range testCases { - from, to := testCase[0], testCase[1] - count := to - from - name := fmt.Sprintf("case#%d, fetch %d blocks [%d, %d)", i, count, from, to) - Convey(name, func(c C) { - - err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result, callOpts...) + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func(c C) { + err := rpc.Call(context.Background(), "bp_getBlockList", testCase.Params(), &result, callOpts...) So(err, ShouldBeNil) - So(len(result), ShouldEqual, count) - for i, item := range result { - cp := blocksMockData[count+from-2-i] - conveyBlock(c, item, cp) + So(len(result.Blocks), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, block := range result.Blocks { + conveyBlock(c, block, testCase.ExpectedResults[len(result.Blocks)-i-1]) } }) } @@ -370,9 +419,7 @@ func TestJSONRPCService(t *testing.T) { var ( result []*models.Transaction invalidParameterCases = map[string][]interface{}{ - "limit < 5": {"nLwnh4a9oiOG9n4FtgboRw", "backward", 4}, - "limit > 100": {"nLwnh4a9oiOG9n4FtgboRw", "backward", 101}, - "unknown direction": {"nLwnh4a9oiOG9n4FtgboRw", "unknown", 10}, + "size over 1000": {"nLwnh4a9oiOG9n4FtgboRw", 1, 1001}, } ) @@ -392,11 +439,24 @@ func TestJSONRPCService(t *testing.T) { Convey("bp_getTransactionList should success on fetching valid number of transactions", func(c C) { var ( - result []*models.Transaction + result = new(api.BPGetTransactionListResponse) testCases = []bpGetTransactionListTestCase{ - {"5MX357EQDlMUxZVPjjXeFQ", "backward", 5, transactionsMockData[2:7]}, - {"CKI1kAfqOxWpmUug23OxTQ", "backward", 5, transactionsMockData[0:1]}, - {"CKI1kAfqOxWpmUug23OxTQ", "forward", 7, transactionsMockData[2:9]}, + { + "5MX357EQDlMUxZVPjjXeFQ", 1, 5, transactionsMockData[2:7], + &models.Pagination{Page: 1, Size: 5, Total: 7, Pages: 2}, + }, + { + "5MX357EQDlMUxZVPjjXeFQ", 2, 5, transactionsMockData[0:2], + &models.Pagination{Page: 2, Size: 5, Total: 7, Pages: 2}, + }, + { + "CKI1kAfqOxWpmUug23OxTQ", 1, 3, transactionsMockData[0:1], + &models.Pagination{Page: 1, Size: 3, Total: 1, Pages: 1}, + }, + { + "CKI1kAfqOxWpmUug23OxTQ", 2, 3, nil, + &models.Pagination{Page: 2, Size: 3, Total: 1, Pages: 1}, + }, } ) @@ -410,12 +470,72 @@ func TestJSONRPCService(t *testing.T) { callOpts..., ) So(err, ShouldBeNil) - So(len(result), ShouldEqual, len(testCase.ExpectedResults)) - for i, item := range result { - cp := testCase.ExpectedResults[i] - if testCase.Direction == "backward" { - cp = testCase.ExpectedResults[len(result)-i-1] - } + So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, item := range result.Transactions { + cp := testCase.ExpectedResults[len(result.Transactions)-i-1] + conveyTransaction(c, item, cp) + } + }) + } + }) + + Convey("bp_getTransactionListOfBlock should fail on invalid parameters", func(c C) { + var ( + result = new(api.BPGetTransactionListResponse) + testCases = map[string][]interface{}{ + "invalid block height": {0, 1, 10}, + "page size over 1000": {10, 1, 1001}, + } + ) + + for name, testCase := range testCases { + Convey(name, func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionListOfBlock", + testCase, + &result, + callOpts..., + ) + So(err, ShouldNotBeNil) + }) + } + }) + + Convey("bp_getTransactionListOfBlock should success on fetching valid number of transactions", func(c C) { + var ( + result = new(api.BPGetTransactionListResponse) + testCases = []bpGetTransactionListOfBlockTestCase{ + { + 7, 1, 3, transactionsMockData[3:6], + &models.Pagination{Page: 1, Size: 3, Total: 5, Pages: 2}, + }, + { + 7, 2, 3, transactionsMockData[1:3], + &models.Pagination{Page: 2, Size: 3, Total: 5, Pages: 2}, + }, + { + 1, 1, 10, nil, + &models.Pagination{Page: 1, Size: 10, Total: 0, Pages: 0}, + }, + } + ) + + for i, testCase := range testCases { + Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func() { + err := rpc.Call( + context.Background(), + "bp_getTransactionListOfBlock", + testCase.Params(), + &result, + callOpts..., + ) + So(err, ShouldBeNil) + So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) + So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) + for i, item := range result.Transactions { + cp := testCase.ExpectedResults[len(result.Transactions)-i-1] conveyTransaction(c, item, cp) } }) From 6acca77c25bd310d28086c468d07f195815fb57f Mon Sep 17 00:00:00 2001 From: Ggicci Date: Fri, 11 Jan 2019 18:17:12 +0800 Subject: [PATCH 126/302] Add missing utils file --- api/models/utils.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 api/models/utils.go diff --git a/api/models/utils.go b/api/models/utils.go new file mode 100644 index 000000000..b8c274a75 --- /dev/null +++ b/api/models/utils.go @@ -0,0 +1,22 @@ +package models + +import ( + "regexp" + "strings" +) + +var ( + selectFromRegexp = regexp.MustCompile("(?is)select\\s+.+?\\s+from") +) + +func buildCountSQL(querySQL string) string { + return selectFromRegexp.ReplaceAllString(querySQL, "SELECT count(*) FROM") +} + +func buildSQLWithConds(querySQL, countSQL string, conds []string) (newQuerySQL, newCountSQL string) { + whereSQL := "" + if len(conds) > 0 { + whereSQL = " WHERE " + strings.Join(conds, " AND ") + } + return querySQL + whereSQL, countSQL + whereSQL +} From f883e86c2eafd3a1224f6290d87f264f8a76a533 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 11:06:59 +0800 Subject: [PATCH 127/302] Optimize client error log --- client/conn.go | 2 +- client/driver.go | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/client/conn.go b/client/conn.go index b0ef613af..383d47b6c 100644 --- a/client/conn.go +++ b/client/conn.go @@ -158,7 +158,7 @@ ackWorkerLoop: var ackRes types.AckResponse // send ack back if err = pc.Call(route.DBSAck.String(), ack, &ackRes); err != nil { - log.WithError(err).Warning("send ack failed") + log.WithError(err).Debug("send ack failed") continue } } diff --git a/client/driver.go b/client/driver.go index 173a64412..5a77a31ce 100644 --- a/client/driver.go +++ b/client/driver.go @@ -430,12 +430,12 @@ func runPeerListUpdater() (err error) { if _, err = getPeers(dbID, privKey); err != nil { log.WithField("db", dbID). WithError(err). - Warning("update peers failed") + Debug("update peers failed") // TODO(xq262144), better rpc remote error judgement if strings.Contains(err.Error(), bp.ErrNoSuchDatabase.Error()) { log.WithField("db", dbID). - Warning("database no longer exists, stopped peers update") + Warning("database no longer exists, stopping peers update") peerList.Delete(dbID) } } @@ -493,14 +493,13 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *pro profileReq.DBID = dbID err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), profileReq, profileResp) if err != nil { - log.WithError(err).Warning("get sqlchain profile failed in getPeers") + err = errors.Wrap(err, "get sqlchain profile failed in getPeers") return } nodeIDs := make([]proto.NodeID, len(profileResp.Profile.Miners)) if len(profileResp.Profile.Miners) <= 0 { - err = ErrInvalidProfile - log.WithError(err).Warning("unexpected error in getPeers") + err = errors.Wrap(ErrInvalidProfile, "unexpected error in getPeers") return } for i, mi := range profileResp.Profile.Miners { @@ -514,7 +513,7 @@ func getPeers(dbID proto.DatabaseID, privKey *asymmetric.PrivateKey) (peers *pro } err = peers.Sign(privKey) if err != nil { - log.WithError(err).Warning("sign peers failed in getPeers") + err = errors.Wrap(err, "sign peers failed in getPeers") return } From 4838e962799d4688ca8c110b36a2c13c7e48b528 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 11:09:59 +0800 Subject: [PATCH 128/302] Update testnet link to doc site --- README-zh.md | 5 ++--- README.md | 3 +-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/README-zh.md b/README-zh.md index 6d7b08c2d..ca6412924 100644 --- a/README-zh.md +++ b/README-zh.md @@ -32,7 +32,7 @@ CovenantSQL 是应用区块链技术构建的去中心化 SQL 云数据库。 CovenantSQL 具备以下特点: -- **SQL接口**: 支持 SQL-92 标准,传统 App 几乎0修改即可数据上链 +- **SQL接口**: 支持 SQL-92 标准,传统 App 几乎 0 修改即可数据上链 - **去中心化**: 基于独有的高效拜占庭容错共识算法 Kayak 实现的去中心化结构 - **不可篡改**: CovenantSQL 中的 Query 历史记录是可追溯的 - **隐私**: 如果 Bitcoin 是用户的钱包,那么 CovenantSQL 就是是用户的去中心化数据库 @@ -123,8 +123,7 @@ CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://test ## 测试网 -- [快捷入口](https://testnet.covenantsql.io/quickstart) -- [测试网水龙头](https://testnet.covenantsql.io/) +- [快捷入口](https://developers.covenantsql.io) ## 联系我们 diff --git a/README.md b/README.md index a47354134..be0318fef 100644 --- a/README.md +++ b/README.md @@ -106,8 +106,7 @@ Watch us or [![follow on Twitter](https://img.shields.io/twitter/url/https/twitt ## TestNet -- [Quick Start](https://testnet.covenantsql.io/quickstart) -- [TestNet faucet](https://testnet.covenantsql.io/) +- [Quick Start](https://developers.covenantsql.io) ## Contact From 7f02a00bc9d4d598b77b64af052a926690589aae Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 18:17:41 +0800 Subject: [PATCH 129/302] Use one liner go test bench command --- .gitlab-ci.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 819e5f5a2..73b0cf720 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,9 +30,7 @@ test-my-project: - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - - bash cleanupDB.sh || true - - cd cmd/cql-minerd - - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ - - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ - - cd - + - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - + - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ && cd - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) From 0dc598f49c66060d43eafe68bfa7e256d0174d2d Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 20:47:21 +0800 Subject: [PATCH 130/302] Fix String implementation to type instead of pointer --- blockproducer/bpinfo.go | 2 +- cmd/cql/main.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index d89732dd3..c9adb5b00 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -30,7 +30,7 @@ type blockProducerInfo struct { } // String implements fmt.Stringer. -func (i *blockProducerInfo) String() string { +func (i blockProducerInfo) String() string { return fmt.Sprintf("[%d/%d|%s] %s", i.rank+1, i.total, i.role, i.nodeID) } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 05225b3e4..6e0a01c3f 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -89,7 +89,7 @@ func (v *varsFlag) Get() []string { return append([]string{}, v.vars...) } -func (v *varsFlag) String() string { +func (v varsFlag) String() string { return fmt.Sprintf("%#v", v.vars) } From 58d5c7974eff257d443999a29a586b6569cbcbe5 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 20:48:43 +0800 Subject: [PATCH 131/302] Ignore hsp for PermStat TransactionState --- blockproducer/interfaces/transaction.go | 1 + types/account.go | 1 + types/request_type.go | 17 +++++++++++++---- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index 619c5ea1c..efcc586c3 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -26,6 +26,7 @@ import ( ) //go:generate hsp +//hsp:ignore TransactionState // AccountNonce defines the an account nonce. type AccountNonce uint32 diff --git a/types/account.go b/types/account.go index 8f8b7295e..d6a80efb7 100644 --- a/types/account.go +++ b/types/account.go @@ -22,6 +22,7 @@ import ( ) //go:generate hsp +//hsp:ignore PermStat // SQLChainRole defines roles of account in a SQLChain. type SQLChainRole byte diff --git a/types/request_type.go b/types/request_type.go index 241d7c982..13c9a5510 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -77,8 +77,8 @@ type QueryKey struct { } // String implements fmt.Stringer for logging purpose. -func (k *QueryKey) String() string { - return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[:8]), k.ConnectionID, k.SeqNo) +func (k QueryKey) String() string { + return fmt.Sprintf("%s#%016x#%016x", string(k.NodeID[len(k.NodeID)-8:]), k.ConnectionID, k.SeqNo) } // SignedRequestHeader defines a signed query request header. @@ -90,8 +90,9 @@ type SignedRequestHeader struct { // Request defines a complete query request. type Request struct { proto.Envelope - Header SignedRequestHeader `json:"h"` - Payload RequestPayload `json:"p"` + Header SignedRequestHeader `json:"h"` + Payload RequestPayload `json:"p"` + _marshalCache []byte `json:"-"` } // String implements fmt.Stringer for logging purpose. @@ -139,6 +140,14 @@ func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } +func (r *Request) SetMarshalCache(buf []byte) { + r._marshalCache = buf +} + +func (r *Request) GetMarshalCache() (buf []byte) { + return r._marshalCache +} + // GetQueryKey returns a unique query key of this request. func (sh *SignedRequestHeader) GetQueryKey() QueryKey { return QueryKey{ From 35b13e2f54f1b135bc73a9f3433f9c0b9515828b Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 20:49:38 +0800 Subject: [PATCH 132/302] Add query payload encode cache --- worker/db_storage.go | 8 +++++++ worker/db_test.go | 52 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/worker/db_storage.go b/worker/db_storage.go index 1461ac49e..f20569e7e 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -31,6 +31,13 @@ import ( // EncodePayload implements kayak.types.Handler.EncodePayload. func (db *Database) EncodePayload(request interface{}) (data []byte, err error) { + if req, ok := request.(*types.Request); ok { + data = req.GetMarshalCache() + if data != nil { + return + } + } + var buf *bytes.Buffer if buf, err = utils.EncodeMsgPack(request); err != nil { @@ -51,6 +58,7 @@ func (db *Database) DecodePayload(data []byte) (request interface{}, err error) return } + req.SetMarshalCache(data) request = req return diff --git a/worker/db_test.go b/worker/db_test.go index aac4ea475..acbb89438 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -24,12 +24,16 @@ import ( "math/rand" "os" "path/filepath" + "reflect" "runtime" "strings" "sync" "testing" "time" + "github.com/fortytw2/leaktest" + . "github.com/smartystreets/goconvey/convey" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -43,8 +47,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/fortytw2/leaktest" - . "github.com/smartystreets/goconvey/convey" ) var rootHash = hash.Hash{} @@ -555,6 +557,52 @@ func TestDatabaseRecycle(t *testing.T) { }) } +func TestDatabase_EncodePayload(t *testing.T) { + Convey("encode payload cache", t, func() { + db := &Database{} + req := &types.Request{ + Envelope: proto.Envelope{ + Version: "", + TTL: 0, + Expire: 0, + NodeID: &proto.RawNodeID{ + Hash: hash.Hash{}, + }, + }, + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: 1, + NodeID: "0000000000000000000000000000000000000000000000000000000000000001", + DatabaseID: "1", + ConnectionID: 1, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + QueriesHash: hash.Hash{}, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "xxx", + Args: nil, + }, + }, + }, + } + encoded, err := db.EncodePayload(req) + So(err, ShouldBeNil) + req2, err := db.DecodePayload(encoded) + So(err, ShouldBeNil) + So(req.Header, ShouldResemble, req2.(*types.Request).Header) + So(reflect.DeepEqual(req.Header, req2.(*types.Request).Header), ShouldBeTrue) + So(reflect.DeepEqual(req.Payload, req2.(*types.Request).Payload), ShouldBeTrue) + encoded2, err := db.EncodePayload(req) + So(err, ShouldBeNil) + So(encoded2, ShouldResemble, encoded) + }) +} + func buildAck(res *types.Response) (ack *types.Ack, err error) { // get node id var nodeID proto.NodeID From 8fec449c841742c294edc1207f533a88d6235685 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 14 Jan 2019 21:04:28 +0800 Subject: [PATCH 133/302] Fix back func (v *varsFlag) String() string --- blockproducer/bpinfo.go | 2 +- cmd/cql/main.go | 2 +- types/request_type.go | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/blockproducer/bpinfo.go b/blockproducer/bpinfo.go index c9adb5b00..d89732dd3 100644 --- a/blockproducer/bpinfo.go +++ b/blockproducer/bpinfo.go @@ -30,7 +30,7 @@ type blockProducerInfo struct { } // String implements fmt.Stringer. -func (i blockProducerInfo) String() string { +func (i *blockProducerInfo) String() string { return fmt.Sprintf("[%d/%d|%s] %s", i.rank+1, i.total, i.role, i.nodeID) } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 6e0a01c3f..05225b3e4 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -89,7 +89,7 @@ func (v *varsFlag) Get() []string { return append([]string{}, v.vars...) } -func (v varsFlag) String() string { +func (v *varsFlag) String() string { return fmt.Sprintf("%#v", v.vars) } diff --git a/types/request_type.go b/types/request_type.go index 13c9a5510..651b4990c 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -140,10 +140,12 @@ func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { return r.Header.Sign(signer) } +// SetMarshalCache sets _marshalCache func (r *Request) SetMarshalCache(buf []byte) { r._marshalCache = buf } +// GetMarshalCache gets _marshalCache func (r *Request) GetMarshalCache() (buf []byte) { return r._marshalCache } From 5ce48ac8a4ce36d025a4fdb35f6301cd94c5e220 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 14 Jan 2019 21:25:58 +0800 Subject: [PATCH 134/302] Fix gitlab ci script pipline will not return failed when go test failed. --- .gitlab-ci.yml | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73b0cf720..e76ba3b08 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,9 +28,12 @@ test-my-project: - make clean - make use_all_cores - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ && cd - + - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ + - cd - - bash cleanupDB.sh || true - - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ && cd - - - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ && cd - + - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ + - cd - + - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ + - cd - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) From 639e98bf9059ac560b80b19cd92ece9999797aca Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 21:39:41 +0800 Subject: [PATCH 135/302] Strip all cd commands in test command --- .gitlab-ci.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e76ba3b08..92fc37e5b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,12 +28,9 @@ test-my-project: - make clean - make use_all_cores - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - - cd rpc && go test -test.bench ^BenchmarkPersistentCaller_Call$ -test.run ^$ - - cd - + - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - - cd cmd/cql-minerd && go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ - - cd - - - cd cmd/cql-minerd && go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ - - cd - + - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) From cd53a2d817c7f9000c0610da30469a5875041229 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 15 Jan 2019 12:22:42 +0800 Subject: [PATCH 136/302] Add SetHash/VerifyHash methods for verifier --- blockproducer/blocknode.go | 2 +- blockproducer/blocknode_test.go | 41 +++++++++++------ blockproducer/chain.go | 37 ++++++--------- blockproducer/chain_test.go | 4 +- blockproducer/errors.go | 4 +- cmd/cqld/bootstrap.go | 16 ++++--- crypto/verifier/common.go | 48 ++++++++++++++++---- types/bp_block.go | 80 +++++++++++++++++---------------- types/bp_block_gen.go | 55 +++++++---------------- types/bp_block_test.go | 42 ++++++++++++++++- 10 files changed, 194 insertions(+), 135 deletions(-) diff --git a/blockproducer/blocknode.go b/blockproducer/blocknode.go index 8a0aaacc8..317ee655f 100644 --- a/blockproducer/blocknode.go +++ b/blockproducer/blocknode.go @@ -43,7 +43,7 @@ func newBlockNode(h uint32, b *types.BPBlock, p *blockNode) *blockNode { }(), height: h, - hash: b.SignedHeader.BlockHash, + hash: b.SignedHeader.DataHash, block: b, } } diff --git a/blockproducer/blocknode_test.go b/blockproducer/blocknode_test.go index dcebfadb3..09299abdc 100644 --- a/blockproducer/blocknode_test.go +++ b/blockproducer/blocknode_test.go @@ -20,6 +20,7 @@ import ( "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/types" . "github.com/smartystreets/goconvey/convey" ) @@ -29,39 +30,49 @@ func TestBlockNode(t *testing.T) { var ( b0 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ - BlockHash: hash.Hash{0x1}, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x1}, + }, }, } b1 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b0.SignedHeader.BlockHash, + ParentHash: b0.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x2}, }, - BlockHash: hash.Hash{0x2}, }, } b2 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b1.SignedHeader.BlockHash, + ParentHash: b1.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x3}, }, - BlockHash: hash.Hash{0x3}, }, } b3 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b2.SignedHeader.BlockHash, + ParentHash: b2.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x4}, }, - BlockHash: hash.Hash{0x4}, }, } b4 = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b3.SignedHeader.BlockHash, + ParentHash: b3.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x5}, }, - BlockHash: hash.Hash{0x5}, }, } n0 = newBlockNode(0, b0, nil) @@ -73,17 +84,21 @@ func TestBlockNode(t *testing.T) { b3p = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b2.SignedHeader.BlockHash, + ParentHash: b2.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x6}, }, - BlockHash: hash.Hash{0x6}, }, } b4p = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - ParentHash: b3p.SignedHeader.BlockHash, + ParentHash: b3p.SignedHeader.DataHash, + }, + DefaultHashSignVerifierImpl: verifier.DefaultHashSignVerifierImpl{ + DataHash: hash.Hash{0x7}, }, - BlockHash: hash.Hash{0x7}, }, } n3p = newBlockNode(3, b3p, n2) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index be25dd3b2..08acc20eb 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -31,7 +31,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/merkle" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" @@ -113,11 +112,20 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) bus = chainbus.New() ) - if fi, err := os.Stat(cfg.DataFile); err == nil && fi.Mode().IsRegular() { - existed = true + // Verify genesis block in config + if cfg.Genesis == nil { + err = ErrNilGenesis + return + } + if ierr = cfg.Genesis.VerifyHash(); ierr != nil { + err = errors.Wrap(ierr, "failed to verify genesis block hash") + return } // Open storage + if fi, err := os.Stat(cfg.DataFile); err == nil && fi.Mode().IsRegular() { + existed = true + } if st, ierr = openStorage(fmt.Sprintf("file:%s", cfg.DataFile)); ierr != nil { err = errors.Wrap(ierr, "failed to open storage") return @@ -134,7 +142,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } var sps = init.compileChanges(nil) sps = append(sps, addBlock(0, cfg.Genesis)) - sps = append(sps, updateIrreversible(cfg.Genesis.SignedHeader.BlockHash)) + sps = append(sps, updateIrreversible(cfg.Genesis.SignedHeader.DataHash)) if ierr = store(st, sps, nil); ierr != nil { err = errors.Wrap(ierr, "failed to initialize storage") return @@ -279,28 +287,9 @@ func (c *Chain) Stop() (err error) { return } -// checkBlock has following steps: 1. check parent block 2. checkTx 2. merkle tree 3. Hash 4. Signature. -func (c *Chain) checkBlock(b *types.BPBlock) (err error) { - rootHash := merkle.NewMerkle(b.GetTxHashes()).GetRoot() - if !b.SignedHeader.MerkleRoot.IsEqual(rootHash) { - return ErrInvalidMerkleTreeRoot - } - - enc, err := b.SignedHeader.BPHeader.MarshalHash() - if err != nil { - return err - } - h := hash.THashH(enc) - if !b.BlockHash().IsEqual(&h) { - return ErrInvalidHash - } - - return nil -} - func (c *Chain) pushBlock(b *types.BPBlock) (err error) { var ierr error - if ierr = c.checkBlock(b); ierr != nil { + if ierr = b.Verify(); ierr != nil { err = errors.Wrap(ierr, "failed to check block") return } diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 58fd88bf0..79c720024 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -121,7 +121,7 @@ func TestChain(t *testing.T) { }), }, } - err = genesis.PackAndSignBlock(testingPrivateKey) + err = genesis.SetHash() So(err, ShouldBeNil) begin = genesis.Timestamp() @@ -147,7 +147,7 @@ func TestChain(t *testing.T) { Convey("A new chain running before genesis time should be waiting for genesis", func() { config.Genesis.SignedHeader.Timestamp = time.Now().Add(24 * time.Hour) - err = genesis.PackAndSignBlock(testingPrivateKey) + err = genesis.SetHash() So(err, ShouldBeNil) chain, err = NewChain(config) So(err, ShouldBeNil) diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 4fee3209f..5599f8d23 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -27,8 +27,6 @@ var ( ErrInvalidHash = errors.New("Hash is invalid") // ErrExistedTx defines existed tx error. ErrExistedTx = errors.New("Tx existed") - // ErrInvalidMerkleTreeRoot defines invalid merkle tree root error. - ErrInvalidMerkleTreeRoot = errors.New("Block merkle tree root does not match the tx hashes") // ErrParentNotMatch defines invalid parent hash. ErrParentNotMatch = errors.New("Block's parent hash cannot match best block") // ErrTooManyTransactionsInBlock defines error of too many transactions in a block. @@ -70,6 +68,8 @@ var ( ErrMinerUserNotMatch = errors.New("miner and user do not match") // ErrInsufficientAdvancePayment indicates that the advance payment is insufficient. ErrInsufficientAdvancePayment = errors.New("insufficient advance payment") + // ErrNilGenesis indicates that the genesis block is nil in config. + ErrNilGenesis = errors.New("nil genesis block") // ErrMultipleGenesis indicates that there're multiple genesis blocks while loading. ErrMultipleGenesis = errors.New("multiple genesis blocks") // ErrInvalidGasPrice indicates that the gas price is invalid. diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 420f8a710..55c1ecb62 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -50,7 +50,10 @@ const ( func runNode(nodeID proto.NodeID, listenAddr string) (err error) { rootPath := conf.GConf.WorkingRoot - genesis := loadGenesis() + genesis, err := loadGenesis() + if err != nil { + return + } var masterKey []byte if !conf.GConf.IsTestMode { @@ -226,11 +229,11 @@ func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.H return } -func loadGenesis() *types.BPBlock { +func loadGenesis() (genesis *types.BPBlock, err error) { genesisInfo := conf.GConf.BP.BPGenesis log.WithField("config", genesisInfo).Info("load genesis config") - genesis := &types.BPBlock{ + genesis = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ Version: genesisInfo.Version, @@ -239,7 +242,6 @@ func loadGenesis() *types.BPBlock { ParentHash: genesisInfo.ParentHash, Timestamp: genesisInfo.Timestamp, }, - BlockHash: genesisInfo.BlockHash, }, } @@ -256,5 +258,9 @@ func loadGenesis() *types.BPBlock { })) } - return genesis + // Rewrite genesis merkle and block hash + if err = genesis.SetHash(); err != nil { + return + } + return } diff --git a/crypto/verifier/common.go b/crypto/verifier/common.go index 7885eb67a..2167da208 100644 --- a/crypto/verifier/common.go +++ b/crypto/verifier/common.go @@ -33,7 +33,11 @@ type MarshalHasher interface { // MarshalHasher, can be signed by a private key and verified later. type HashSignVerifier interface { Hash() hash.Hash + SetHash(MarshalHasher) error + SignHash(*ca.PrivateKey) error Sign(MarshalHasher, *ca.PrivateKey) error + VerifyHash(MarshalHasher) error + VerifySignature() error Verify(MarshalHasher) error } @@ -49,23 +53,37 @@ func (i *DefaultHashSignVerifierImpl) Hash() hash.Hash { return i.DataHash } -// Sign implements HashSignVerifier.Sign. -func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { +// SetHash implements HashSignVerifier.SetHash. +func (i *DefaultHashSignVerifierImpl) SetHash(mh MarshalHasher) (err error) { var enc []byte if enc, err = mh.MarshalHash(); err != nil { return } - var h = hash.THashH(enc) - if i.Signature, err = signer.Sign(h[:]); err != nil { + i.DataHash = hash.THashH(enc) + return +} + +// SignHash implements HashSignVerifier.SignHash. +func (i *DefaultHashSignVerifierImpl) SignHash(signer *ca.PrivateKey) (err error) { + if i.Signature, err = signer.Sign(i.DataHash[:]); err != nil { return } - i.DataHash = h i.Signee = signer.PubKey() return } -// Verify implements HashSignVerifier.Verify. -func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { +// Sign implements HashSignVerifier.Sign. +func (i *DefaultHashSignVerifierImpl) Sign(mh MarshalHasher, signer *ca.PrivateKey) (err error) { + // Set hash + if err = i.SetHash(mh); err != nil { + return + } + err = i.SignHash(signer) + return +} + +// VerifyHash implements HashSignVerifier.VerifyHash. +func (i *DefaultHashSignVerifierImpl) VerifyHash(mh MarshalHasher) (err error) { var enc []byte if enc, err = mh.MarshalHash(); err != nil { return @@ -75,9 +93,23 @@ func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { err = errors.WithStack(ErrHashValueNotMatch) return } - if i.Signature == nil || i.Signee == nil || !i.Signature.Verify(h[:], i.Signee) { + return +} + +// VerifySignature implements HashSignVerifier.VerifySignature. +func (i *DefaultHashSignVerifierImpl) VerifySignature() (err error) { + if i.Signature == nil || i.Signee == nil || !i.Signature.Verify(i.DataHash[:], i.Signee) { err = errors.WithStack(ErrSignatureNotMatch) return } return } + +// Verify implements HashSignVerifier.Verify. +func (i *DefaultHashSignVerifierImpl) Verify(mh MarshalHasher) (err error) { + if err = i.VerifyHash(mh); err != nil { + return + } + err = i.VerifySignature() + return +} diff --git a/types/bp_block.go b/types/bp_block.go index 72c721e10..4fd05a3ad 100644 --- a/types/bp_block.go +++ b/types/bp_block.go @@ -22,6 +22,7 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/merkle" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -40,18 +41,23 @@ type BPHeader struct { // BPSignedHeader defines the main chain header with the signature. type BPSignedHeader struct { BPHeader - BlockHash hash.Hash - Signee *asymmetric.PublicKey - Signature *asymmetric.Signature + verifier.DefaultHashSignVerifierImpl } -// Verify verifies the signature. -func (s *BPSignedHeader) Verify() error { - if !s.Signature.Verify(s.BlockHash[:], s.Signee) { - return ErrSignVerification - } +func (s *BPSignedHeader) verifyHash() error { + return s.DefaultHashSignVerifierImpl.VerifyHash(&s.BPHeader) +} - return nil +func (s *BPSignedHeader) verify() error { + return s.DefaultHashSignVerifierImpl.Verify(&s.BPHeader) +} + +func (s *BPSignedHeader) setHash() error { + return s.DefaultHashSignVerifierImpl.SetHash(&s.BPHeader) +} + +func (s *BPSignedHeader) sign(signer *asymmetric.PrivateKey) error { + return s.DefaultHashSignVerifierImpl.Sign(&s.BPHeader, signer) } // BPBlock defines the main chain block. @@ -73,47 +79,45 @@ func (b *BPBlock) GetTxHashes() []*hash.Hash { return hs } -// PackAndSignBlock computes block's hash and sign it. -func (b *BPBlock) PackAndSignBlock(signer *asymmetric.PrivateKey) error { - hs := b.GetTxHashes() - - b.SignedHeader.MerkleRoot = *merkle.NewMerkle(hs).GetRoot() - enc, err := b.SignedHeader.BPHeader.MarshalHash() +func (b *BPBlock) setMerkleRoot() { + var merkleRoot = merkle.NewMerkle(b.GetTxHashes()).GetRoot() + b.SignedHeader.MerkleRoot = *merkleRoot +} - if err != nil { - return err +func (b *BPBlock) verifyMerkleRoot() error { + var merkleRoot = *merkle.NewMerkle(b.GetTxHashes()).GetRoot() + if !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { + return ErrMerkleRootVerification } + return nil +} - b.SignedHeader.BlockHash = hash.THashH(enc) - b.SignedHeader.Signature, err = signer.Sign(b.SignedHeader.BlockHash[:]) - b.SignedHeader.Signee = signer.PubKey() +// SetHash sets the block header hash, including the merkle root of the packed transactions. +func (b *BPBlock) SetHash() error { + b.setMerkleRoot() + return b.SignedHeader.setHash() +} - if err != nil { +// VerifyHash verifies the block header hash, including the merkle root of the packed transactions. +func (b *BPBlock) VerifyHash() error { + if err := b.verifyMerkleRoot(); err != nil { return err } + return b.SignedHeader.verifyHash() +} - return nil +// PackAndSignBlock computes block's hash and sign it. +func (b *BPBlock) PackAndSignBlock(signer *asymmetric.PrivateKey) error { + b.setMerkleRoot() + return b.SignedHeader.sign(signer) } // Verify verifies whether the block is valid. func (b *BPBlock) Verify() error { - hs := b.GetTxHashes() - merkleRoot := *merkle.NewMerkle(hs).GetRoot() - if !merkleRoot.IsEqual(&b.SignedHeader.MerkleRoot) { - return ErrMerkleRootVerification - } - - enc, err := b.SignedHeader.BPHeader.MarshalHash() - if err != nil { + if err := b.verifyMerkleRoot(); err != nil { return err } - - h := hash.THashH(enc) - if !h.IsEqual(&b.SignedHeader.BlockHash) { - return ErrHashVerification - } - - return b.SignedHeader.Verify() + return b.SignedHeader.verify() } // Timestamp returns timestamp of block. @@ -133,5 +137,5 @@ func (b *BPBlock) ParentHash() *hash.Hash { // BlockHash returns the parent hash field of the block header. func (b *BPBlock) BlockHash() *hash.Hash { - return &b.SignedHeader.BlockHash + return &b.SignedHeader.DataHash } diff --git a/types/bp_block_gen.go b/types/bp_block_gen.go index 5afbe938a..baa2fd975 100644 --- a/types/bp_block_gen.go +++ b/types/bp_block_gen.go @@ -11,8 +11,15 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.SignedHeader.MarshalHash(); err != nil { + // map header, size 2 + o = append(o, 0x82, 0x82, 0x82, 0x82) + if oTemp, err := z.SignedHeader.BPHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = append(o, 0x82) + if oTemp, err := z.SignedHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -31,7 +38,7 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPBlock) Msgsize() (s int) { - s = 1 + 13 + z.SignedHeader.Msgsize() + 13 + hsp.ArrayHeaderSize + s = 1 + 13 + 1 + 9 + z.SignedHeader.BPHeader.Msgsize() + 28 + z.SignedHeader.DefaultHashSignVerifierImpl.Msgsize() + 13 + hsp.ArrayHeaderSize for za0001 := range z.Transactions { s += z.Transactions[za0001].Msgsize() } @@ -78,35 +85,15 @@ func (z *BPHeader) Msgsize() (s int) { func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x84) + // map header, size 2 + o = append(o, 0x82, 0x82) if oTemp, err := z.BPHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.BlockHash.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -116,18 +103,6 @@ func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPSignedHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } - s += 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 9 + z.BPHeader.Msgsize() + 10 + z.BlockHash.Msgsize() + s = 1 + 9 + z.BPHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/bp_block_test.go b/types/bp_block_test.go index 60bf2d793..3c492dd97 100644 --- a/types/bp_block_test.go +++ b/types/bp_block_test.go @@ -22,7 +22,9 @@ import ( "reflect" "testing" + "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" ) func TestHeader_MarshalUnmarshalBinary(t *testing.T) { @@ -114,14 +116,50 @@ func TestBlock_PackAndSignBlock(t *testing.T) { t.Fatalf("failed to generate block: %v", err) } + err = block.verifyMerkleRoot() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + err = block.VerifyHash() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + err = block.Verify() + if err != nil { + t.Fatalf("failed to verify: %v", err) + } + + block.SignedHeader.DataHash[0]++ err = block.Verify() + if errors.Cause(err) != verifier.ErrHashValueNotMatch { + t.Fatalf("unexpected error: %v", err) + } + err = block.VerifyHash() + if errors.Cause(err) != verifier.ErrHashValueNotMatch { + t.Fatalf("unexpected error: %v", err) + } + err = block.SetHash() + if err != nil { + t.Fatalf("failed to set hash: %v", err) + } + err = block.VerifyHash() if err != nil { t.Fatalf("failed to verify: %v", err) } - block.SignedHeader.BlockHash[0]++ + block.SignedHeader.MerkleRoot[0]++ err = block.Verify() - if err != ErrHashVerification { + if err != ErrMerkleRootVerification { + t.Fatalf("unexpected error: %v", err) + } + err = block.VerifyHash() + if err != ErrMerkleRootVerification { + t.Fatalf("unexpected error: %v", err) + } + err = block.verifyMerkleRoot() + if err != ErrMerkleRootVerification { t.Fatalf("unexpected error: %v", err) } From 77bf73c639d9510bbc8d5b0303e8c019c13535f9 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 15 Jan 2019 16:44:28 +0800 Subject: [PATCH 137/302] Add signature verify cache --- crypto/asymmetric/signature.go | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/crypto/asymmetric/signature.go b/crypto/asymmetric/signature.go index ee3a02b9a..b0d11a55a 100644 --- a/crypto/asymmetric/signature.go +++ b/crypto/asymmetric/signature.go @@ -17,20 +17,24 @@ package asymmetric import ( + "context" "crypto/elliptic" "errors" "math/big" + "runtime/trace" "github.com/CovenantSQL/CovenantSQL/crypto/secp256k1" "github.com/CovenantSQL/CovenantSQL/utils" hsp "github.com/CovenantSQL/HashStablePack/marshalhash" ec "github.com/btcsuite/btcd/btcec" + lru "github.com/hashicorp/golang-lru" ) var ( // BypassSignature is the flag indicate if bypassing signature sign & verify BypassSignature = false bypassS *Signature + verifyCache *lru.Cache ) // For test Signature.Sign mock @@ -38,6 +42,7 @@ func init() { priv, _ := ec.NewPrivateKey(ec.S256()) ss, _ := (*ec.PrivateKey)(priv).Sign(([]byte)("00000000000000000000000000000000")) bypassS = (*Signature)(ss) + verifyCache, _ = lru.New(256) } // Signature is a type representing an ecdsa signature. @@ -71,6 +76,7 @@ func (s *Signature) IsEqual(signature *Signature) bool { // a larger message) using the private key. Produced signature is deterministic (same message and // same key yield the same signature) and canonical in accordance with RFC6979 and BIP0062. func (private *PrivateKey) Sign(hash []byte) (*Signature, error) { + defer trace.StartRegion(context.Background(), "SignatureSign").End() if len(hash) != 32 { return nil, errors.New("only hash can be signed") } @@ -85,12 +91,14 @@ func (private *PrivateKey) Sign(hash []byte) (*Signature, error) { S: new(big.Int).SetBytes(sb[32:64]), } //s, e := (*ec.PrivateKey)(private).Sign(hash) + return (*Signature)(s), e } // Verify calls ecdsa.Verify to verify the signature of hash using the public key. It returns true // if the signature is valid, false otherwise. func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { + defer trace.StartRegion(context.Background(), "SignatureVerify").End() if BypassSignature { return true } @@ -98,12 +106,24 @@ func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { return false } - signature := make([]byte, 64) + cacheKey := make([]byte, 64+len(hash)+ec.PubKeyBytesLenUncompressed) + signature := cacheKey[:64] copy(signature, utils.PaddedBigBytes(s.R, 32)) copy(signature[32:], utils.PaddedBigBytes(s.S, 32)) + copy(cacheKey[64:64+len(hash)], hash) signeeBytes := (*ec.PublicKey)(signee).SerializeUncompressed() - ret := secp256k1.VerifySignature(signeeBytes, hash, signature) - return ret + copy(cacheKey[64+len(hash):], signeeBytes) + + if _, ok := verifyCache.Get(string(cacheKey)); ok { + return true + } + _, task := trace.NewTask(context.Background(), "secp256k1.VerifySignature") + valid := secp256k1.VerifySignature(signeeBytes, hash, signature) + task.End() + if valid { + verifyCache.Add(string(cacheKey), nil) + } + return valid //return ecdsa.Verify(signee.toECDSA(), hash, s.R, s.S) } From ca2a2c2dd3e3a460ae0afddd5f5d72284f4e5596 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 15 Jan 2019 16:46:44 +0800 Subject: [PATCH 138/302] Add persisted genesis block hash check on startup --- blockproducer/chain.go | 10 +++++++++ blockproducer/chain_test.go | 42 +++++++++++++++++++++++++++++++++++++ blockproducer/errors.go | 3 +++ 3 files changed, 55 insertions(+) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 08acc20eb..83409833e 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -130,6 +130,11 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) err = errors.Wrap(ierr, "failed to open storage") return } + defer func() { + if err != nil { + st.Close() + } + }() // Create initial state from genesis block and store if !existed { @@ -154,6 +159,11 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) err = errors.Wrap(ierr, "failed to load data from storage") return } + if persistedGenesis := irre.ancestorByCount(0); persistedGenesis == nil || + !persistedGenesis.hash.IsEqual(cfg.Genesis.BlockHash()) { + err = ErrGenesisHashNotMatch + return + } for _, v := range heads { log.WithFields(log.Fields{ "irre_hash": irre.hash.Short(4), diff --git a/blockproducer/chain_test.go b/blockproducer/chain_test.go index 79c720024..cc6c662a9 100644 --- a/blockproducer/chain_test.go +++ b/blockproducer/chain_test.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -312,6 +313,47 @@ func TestChain(t *testing.T) { chain.stat() }) + Convey("The chain should report error if genesis in config is cleared", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis = nil + chain, err = NewChain(config) + So(err, ShouldEqual, ErrNilGenesis) + So(chain, ShouldBeNil) + }) + + Convey("The chain should report error if config is changed", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis.Transactions = append( + config.Genesis.Transactions, + types.NewBaseAccount(&types.Account{ + Address: addr2, + TokenBalance: [5]uint64{1000, 1000, 1000, 1000, 1000}, + }), + ) + chain, err = NewChain(config) + So(errors.Cause(err), ShouldEqual, types.ErrMerkleRootVerification) + So(chain, ShouldBeNil) + }) + + Convey("The chain should report error if config is changed and rehashed", func() { + err = chain.Stop() + So(err, ShouldBeNil) + config.Genesis.Transactions = append( + config.Genesis.Transactions, + types.NewBaseAccount(&types.Account{ + Address: addr2, + TokenBalance: [5]uint64{1000, 1000, 1000, 1000, 1000}, + }), + ) + err = config.Genesis.SetHash() + So(err, ShouldBeNil) + chain, err = NewChain(config) + So(err, ShouldEqual, ErrGenesisHashNotMatch) + So(chain, ShouldBeNil) + }) + Convey("The chain APIs should return expected results", func() { var ( bl *types.BPBlock diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 5599f8d23..42421b072 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -72,6 +72,9 @@ var ( ErrNilGenesis = errors.New("nil genesis block") // ErrMultipleGenesis indicates that there're multiple genesis blocks while loading. ErrMultipleGenesis = errors.New("multiple genesis blocks") + // ErrGenesisHashNotMatch indicates that the genesis block hash in config doesn't match + // the persisted one. + ErrGenesisHashNotMatch = errors.New("persisted genesis block hash not match") // ErrInvalidGasPrice indicates that the gas price is invalid. ErrInvalidGasPrice = errors.New("gas price is invalid") // ErrInvalidMinerCount indicates that the miner node count is invalid. From b8604288314736d45d77679dca5c2270e7a1c2a1 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 15 Jan 2019 16:49:09 +0800 Subject: [PATCH 139/302] Remove trace --- crypto/asymmetric/signature.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/crypto/asymmetric/signature.go b/crypto/asymmetric/signature.go index b0d11a55a..3aaf08045 100644 --- a/crypto/asymmetric/signature.go +++ b/crypto/asymmetric/signature.go @@ -17,17 +17,16 @@ package asymmetric import ( - "context" "crypto/elliptic" "errors" "math/big" - "runtime/trace" - "github.com/CovenantSQL/CovenantSQL/crypto/secp256k1" - "github.com/CovenantSQL/CovenantSQL/utils" hsp "github.com/CovenantSQL/HashStablePack/marshalhash" ec "github.com/btcsuite/btcd/btcec" lru "github.com/hashicorp/golang-lru" + + "github.com/CovenantSQL/CovenantSQL/crypto/secp256k1" + "github.com/CovenantSQL/CovenantSQL/utils" ) var ( @@ -76,7 +75,6 @@ func (s *Signature) IsEqual(signature *Signature) bool { // a larger message) using the private key. Produced signature is deterministic (same message and // same key yield the same signature) and canonical in accordance with RFC6979 and BIP0062. func (private *PrivateKey) Sign(hash []byte) (*Signature, error) { - defer trace.StartRegion(context.Background(), "SignatureSign").End() if len(hash) != 32 { return nil, errors.New("only hash can be signed") } @@ -98,7 +96,6 @@ func (private *PrivateKey) Sign(hash []byte) (*Signature, error) { // Verify calls ecdsa.Verify to verify the signature of hash using the public key. It returns true // if the signature is valid, false otherwise. func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { - defer trace.StartRegion(context.Background(), "SignatureVerify").End() if BypassSignature { return true } @@ -117,9 +114,7 @@ func (s *Signature) Verify(hash []byte, signee *PublicKey) bool { if _, ok := verifyCache.Get(string(cacheKey)); ok { return true } - _, task := trace.NewTask(context.Background(), "secp256k1.VerifySignature") valid := secp256k1.VerifySignature(signeeBytes, hash, signature) - task.End() if valid { verifyCache.Add(string(cacheKey), nil) } From 11f9fef867572a6b1628b10d058fd31c0229529d Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 15 Jan 2019 17:17:41 +0800 Subject: [PATCH 140/302] Add github.com/hashicorp/golang-lru --- Gopkg.lock | 12 + vendor/github.com/hashicorp/golang-lru/2q.go | 223 +++++++++++ .../github.com/hashicorp/golang-lru/LICENSE | 362 ++++++++++++++++++ .../github.com/hashicorp/golang-lru/README.md | 25 ++ vendor/github.com/hashicorp/golang-lru/arc.go | 257 +++++++++++++ vendor/github.com/hashicorp/golang-lru/doc.go | 21 + vendor/github.com/hashicorp/golang-lru/go.mod | 1 + vendor/github.com/hashicorp/golang-lru/lru.go | 110 ++++++ .../hashicorp/golang-lru/simplelru/lru.go | 161 ++++++++ .../golang-lru/simplelru/lru_interface.go | 36 ++ 10 files changed, 1208 insertions(+) create mode 100644 vendor/github.com/hashicorp/golang-lru/2q.go create mode 100644 vendor/github.com/hashicorp/golang-lru/LICENSE create mode 100644 vendor/github.com/hashicorp/golang-lru/README.md create mode 100644 vendor/github.com/hashicorp/golang-lru/arc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/doc.go create mode 100644 vendor/github.com/hashicorp/golang-lru/go.mod create mode 100644 vendor/github.com/hashicorp/golang-lru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru.go create mode 100644 vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go diff --git a/Gopkg.lock b/Gopkg.lock index e70594996..a6d747eb8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -239,6 +239,17 @@ revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" version = "v1.6.2" +[[projects]] + digest = "1:8ec8d88c248041a6df5f6574b87bc00e7e0b493881dad2e7ef47b11dc69093b5" + name = "github.com/hashicorp/golang-lru" + packages = [ + ".", + "simplelru", + ] + pruneopts = "UT" + revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" + version = "v0.5.0" + [[projects]] branch = "master" digest = "1:438016f7d4af8e5a7010b6d0705b267a7607ddc0decad051e83a9458c6b9a523" @@ -651,6 +662,7 @@ "github.com/fortytw2/leaktest", "github.com/gorilla/handlers", "github.com/gorilla/mux", + "github.com/hashicorp/golang-lru", "github.com/jmoiron/jsonq", "github.com/jordwest/mock-conn", "github.com/lufia/iostat", diff --git a/vendor/github.com/hashicorp/golang-lru/2q.go b/vendor/github.com/hashicorp/golang-lru/2q.go new file mode 100644 index 000000000..e474cd075 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/2q.go @@ -0,0 +1,223 @@ +package lru + +import ( + "fmt" + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // Default2QRecentRatio is the ratio of the 2Q cache dedicated + // to recently added entries that have only been accessed once. + Default2QRecentRatio = 0.25 + + // Default2QGhostEntries is the default ratio of ghost + // entries kept to track entries recently evicted + Default2QGhostEntries = 0.50 +) + +// TwoQueueCache is a thread-safe fixed size 2Q cache. +// 2Q is an enhancement over the standard LRU cache +// in that it tracks both frequently and recently used +// entries separately. This avoids a burst in access to new +// entries from evicting frequently used entries. It adds some +// additional tracking overhead to the standard LRU cache, and is +// computationally about 2x the cost, and adds some metadata over +// head. The ARCCache is similar, but does not require setting any +// parameters. +type TwoQueueCache struct { + size int + recentSize int + + recent simplelru.LRUCache + frequent simplelru.LRUCache + recentEvict simplelru.LRUCache + lock sync.RWMutex +} + +// New2Q creates a new TwoQueueCache using the default +// values for the parameters. +func New2Q(size int) (*TwoQueueCache, error) { + return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries) +} + +// New2QParams creates a new TwoQueueCache using the provided +// parameter values. +func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) { + if size <= 0 { + return nil, fmt.Errorf("invalid size") + } + if recentRatio < 0.0 || recentRatio > 1.0 { + return nil, fmt.Errorf("invalid recent ratio") + } + if ghostRatio < 0.0 || ghostRatio > 1.0 { + return nil, fmt.Errorf("invalid ghost ratio") + } + + // Determine the sub-sizes + recentSize := int(float64(size) * recentRatio) + evictSize := int(float64(size) * ghostRatio) + + // Allocate the LRUs + recent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + frequent, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + recentEvict, err := simplelru.NewLRU(evictSize, nil) + if err != nil { + return nil, err + } + + // Initialize the cache + c := &TwoQueueCache{ + size: size, + recentSize: recentSize, + recent: recent, + frequent: frequent, + recentEvict: recentEvict, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if this is a frequent value + if val, ok := c.frequent.Get(key); ok { + return val, ok + } + + // If the value is contained in recent, then we + // promote it to frequent + if val, ok := c.recent.Peek(key); ok { + c.recent.Remove(key) + c.frequent.Add(key, val) + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *TwoQueueCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is frequently used already, + // and just update the value + if c.frequent.Contains(key) { + c.frequent.Add(key, value) + return + } + + // Check if the value is recently used, and promote + // the value into the frequent list + if c.recent.Contains(key) { + c.recent.Remove(key) + c.frequent.Add(key, value) + return + } + + // If the value was recently evicted, add it to the + // frequently used list + if c.recentEvict.Contains(key) { + c.ensureSpace(true) + c.recentEvict.Remove(key) + c.frequent.Add(key, value) + return + } + + // Add to the recently seen list + c.ensureSpace(false) + c.recent.Add(key, value) + return +} + +// ensureSpace is used to ensure we have space in the cache +func (c *TwoQueueCache) ensureSpace(recentEvict bool) { + // If we have space, nothing to do + recentLen := c.recent.Len() + freqLen := c.frequent.Len() + if recentLen+freqLen < c.size { + return + } + + // If the recent buffer is larger than + // the target, evict from there + if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) { + k, _, _ := c.recent.RemoveOldest() + c.recentEvict.Add(k, nil) + return + } + + // Remove from the frequent list otherwise + c.frequent.RemoveOldest() +} + +// Len returns the number of items in the cache. +func (c *TwoQueueCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.recent.Len() + c.frequent.Len() +} + +// Keys returns a slice of the keys in the cache. +// The frequently used keys are first in the returned slice. +func (c *TwoQueueCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.frequent.Keys() + k2 := c.recent.Keys() + return append(k1, k2...) +} + +// Remove removes the provided key from the cache. +func (c *TwoQueueCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.frequent.Remove(key) { + return + } + if c.recent.Remove(key) { + return + } + if c.recentEvict.Remove(key) { + return + } +} + +// Purge is used to completely clear the cache. +func (c *TwoQueueCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.recent.Purge() + c.frequent.Purge() + c.recentEvict.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *TwoQueueCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.frequent.Contains(key) || c.recent.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.frequent.Peek(key); ok { + return val, ok + } + return c.recent.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 000000000..be2cc4dfb --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/README.md b/vendor/github.com/hashicorp/golang-lru/README.md new file mode 100644 index 000000000..33e58cfaf --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/README.md @@ -0,0 +1,25 @@ +golang-lru +========== + +This provides the `lru` package which implements a fixed-size +thread safe LRU cache. It is based on the cache in Groupcache. + +Documentation +============= + +Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru) + +Example +======= + +Using the LRU is very simple: + +```go +l, _ := New(128) +for i := 0; i < 256; i++ { + l.Add(i, nil) +} +if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) +} +``` diff --git a/vendor/github.com/hashicorp/golang-lru/arc.go b/vendor/github.com/hashicorp/golang-lru/arc.go new file mode 100644 index 000000000..555225a21 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/arc.go @@ -0,0 +1,257 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC). +// ARC is an enhancement over the standard LRU cache in that tracks both +// frequency and recency of use. This avoids a burst in access to new +// entries from evicting the frequently used older entries. It adds some +// additional tracking overhead to a standard LRU cache, computationally +// it is roughly 2x the cost, and the extra memory overhead is linear +// with the size of the cache. ARC has been patented by IBM, but is +// similar to the TwoQueueCache (2Q) which requires setting parameters. +type ARCCache struct { + size int // Size is the total capacity of the cache + p int // P is the dynamic preference towards T1 or T2 + + t1 simplelru.LRUCache // T1 is the LRU for recently accessed items + b1 simplelru.LRUCache // B1 is the LRU for evictions from t1 + + t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items + b2 simplelru.LRUCache // B2 is the LRU for evictions from t2 + + lock sync.RWMutex +} + +// NewARC creates an ARC of the given size +func NewARC(size int) (*ARCCache, error) { + // Create the sub LRUs + b1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + b2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t1, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + t2, err := simplelru.NewLRU(size, nil) + if err != nil { + return nil, err + } + + // Initialize the ARC + c := &ARCCache{ + size: size, + p: 0, + t1: t1, + b1: b1, + t2: t2, + b2: b2, + } + return c, nil +} + +// Get looks up a key's value from the cache. +func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + + // If the value is contained in T1 (recent), then + // promote it to T2 (frequent) + if val, ok := c.t1.Peek(key); ok { + c.t1.Remove(key) + c.t2.Add(key, val) + return val, ok + } + + // Check if the value is contained in T2 (frequent) + if val, ok := c.t2.Get(key); ok { + return val, ok + } + + // No hit + return nil, false +} + +// Add adds a value to the cache. +func (c *ARCCache) Add(key, value interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + // Check if the value is contained in T1 (recent), and potentially + // promote it to frequent T2 + if c.t1.Contains(key) { + c.t1.Remove(key) + c.t2.Add(key, value) + return + } + + // Check if the value is already in T2 (frequent) and update it + if c.t2.Contains(key) { + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // recently used list + if c.b1.Contains(key) { + // T1 set is too small, increase P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b2Len > b1Len { + delta = b2Len / b1Len + } + if c.p+delta >= c.size { + c.p = c.size + } else { + c.p += delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Remove from B1 + c.b1.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Check if this value was recently evicted as part of the + // frequently used list + if c.b2.Contains(key) { + // T2 set is too small, decrease P appropriately + delta := 1 + b1Len := c.b1.Len() + b2Len := c.b2.Len() + if b1Len > b2Len { + delta = b1Len / b2Len + } + if delta >= c.p { + c.p = 0 + } else { + c.p -= delta + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(true) + } + + // Remove from B2 + c.b2.Remove(key) + + // Add the key to the frequently used list + c.t2.Add(key, value) + return + } + + // Potentially need to make room in the cache + if c.t1.Len()+c.t2.Len() >= c.size { + c.replace(false) + } + + // Keep the size of the ghost buffers trim + if c.b1.Len() > c.size-c.p { + c.b1.RemoveOldest() + } + if c.b2.Len() > c.p { + c.b2.RemoveOldest() + } + + // Add to the recently seen list + c.t1.Add(key, value) + return +} + +// replace is used to adaptively evict from either T1 or T2 +// based on the current learned value of P +func (c *ARCCache) replace(b2ContainsKey bool) { + t1Len := c.t1.Len() + if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) { + k, _, ok := c.t1.RemoveOldest() + if ok { + c.b1.Add(k, nil) + } + } else { + k, _, ok := c.t2.RemoveOldest() + if ok { + c.b2.Add(k, nil) + } + } +} + +// Len returns the number of cached entries +func (c *ARCCache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Len() + c.t2.Len() +} + +// Keys returns all the cached keys +func (c *ARCCache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + k1 := c.t1.Keys() + k2 := c.t2.Keys() + return append(k1, k2...) +} + +// Remove is used to purge a key from the cache +func (c *ARCCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + if c.t1.Remove(key) { + return + } + if c.t2.Remove(key) { + return + } + if c.b1.Remove(key) { + return + } + if c.b2.Remove(key) { + return + } +} + +// Purge is used to clear the cache +func (c *ARCCache) Purge() { + c.lock.Lock() + defer c.lock.Unlock() + c.t1.Purge() + c.t2.Purge() + c.b1.Purge() + c.b2.Purge() +} + +// Contains is used to check if the cache contains a key +// without updating recency or frequency. +func (c *ARCCache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.t1.Contains(key) || c.t2.Contains(key) +} + +// Peek is used to inspect the cache value of a key +// without updating recency or frequency. +func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + if val, ok := c.t1.Peek(key); ok { + return val, ok + } + return c.t2.Peek(key) +} diff --git a/vendor/github.com/hashicorp/golang-lru/doc.go b/vendor/github.com/hashicorp/golang-lru/doc.go new file mode 100644 index 000000000..2547df979 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/doc.go @@ -0,0 +1,21 @@ +// Package lru provides three different LRU caches of varying sophistication. +// +// Cache is a simple LRU cache. It is based on the +// LRU implementation in groupcache: +// https://github.com/golang/groupcache/tree/master/lru +// +// TwoQueueCache tracks frequently used and recently used entries separately. +// This avoids a burst of accesses from taking out frequently used entries, +// at the cost of about 2x computational overhead and some extra bookkeeping. +// +// ARCCache is an adaptive replacement cache. It tracks recent evictions as +// well as recent usage in both the frequent and recent caches. Its +// computational overhead is comparable to TwoQueueCache, but the memory +// overhead is linear with the size of the cache. +// +// ARC has been patented by IBM, so do not use it if that is problematic for +// your program. +// +// All caches in this package take locks while operating, and are therefore +// thread-safe for consumers. +package lru diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod new file mode 100644 index 000000000..824cb97e8 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -0,0 +1 @@ +module github.com/hashicorp/golang-lru diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go new file mode 100644 index 000000000..c8d9b0a23 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -0,0 +1,110 @@ +package lru + +import ( + "sync" + + "github.com/hashicorp/golang-lru/simplelru" +) + +// Cache is a thread-safe fixed size LRU cache. +type Cache struct { + lru simplelru.LRUCache + lock sync.RWMutex +} + +// New creates an LRU of the given size. +func New(size int) (*Cache, error) { + return NewWithEvict(size, nil) +} + +// NewWithEvict constructs a fixed size cache with the given eviction +// callback. +func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) { + lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted)) + if err != nil { + return nil, err + } + c := &Cache{ + lru: lru, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *Cache) Purge() { + c.lock.Lock() + c.lru.Purge() + c.lock.Unlock() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *Cache) Add(key, value interface{}) (evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Add(key, value) +} + +// Get looks up a key's value from the cache. +func (c *Cache) Get(key interface{}) (value interface{}, ok bool) { + c.lock.Lock() + defer c.lock.Unlock() + return c.lru.Get(key) +} + +// Contains checks if a key is in the cache, without updating the +// recent-ness or deleting it for being stale. +func (c *Cache) Contains(key interface{}) bool { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Contains(key) +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Peek(key) +} + +// ContainsOrAdd checks if a key is in the cache without updating the +// recent-ness or deleting it for being stale, and if not, adds the value. +// Returns whether found and whether an eviction occurred. +func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.lru.Contains(key) { + return true, false + } + evicted = c.lru.Add(key, value) + return false, evicted +} + +// Remove removes the provided key from the cache. +func (c *Cache) Remove(key interface{}) { + c.lock.Lock() + c.lru.Remove(key) + c.lock.Unlock() +} + +// RemoveOldest removes the oldest item from the cache. +func (c *Cache) RemoveOldest() { + c.lock.Lock() + c.lru.RemoveOldest() + c.lock.Unlock() +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *Cache) Keys() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Keys() +} + +// Len returns the number of items in the cache. +func (c *Cache) Len() int { + c.lock.RLock() + defer c.lock.RUnlock() + return c.lru.Len() +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 000000000..5673773b2 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,161 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 000000000..74c707744 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,36 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Check if a key exsists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clear all cache entries + Purge() +} From 3f5a460b2060393e62faecfed64e809376cc9e60 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 15 Jan 2019 17:53:37 +0800 Subject: [PATCH 141/302] Add TransactionState MarshalHash --- blockproducer/interfaces/transaction.go | 1 - blockproducer/interfaces/transaction_gen.go | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index efcc586c3..619c5ea1c 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -26,7 +26,6 @@ import ( ) //go:generate hsp -//hsp:ignore TransactionState // AccountNonce defines the an account nonce. type AccountNonce uint32 diff --git a/blockproducer/interfaces/transaction_gen.go b/blockproducer/interfaces/transaction_gen.go index c61b6bc1d..7a1c151e9 100644 --- a/blockproducer/interfaces/transaction_gen.go +++ b/blockproducer/interfaces/transaction_gen.go @@ -20,6 +20,20 @@ func (z AccountNonce) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z TransactionState) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + o = hsp.AppendUint32(o, uint32(z)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z TransactionState) Msgsize() (s int) { + s = hsp.Uint32Size + return +} + // MarshalHash marshals for hash func (z TransactionType) MarshalHash() (o []byte, err error) { var b []byte From 95aa69171ae1c4ede7264512fcf0d265025342eb Mon Sep 17 00:00:00 2001 From: leventeliu Date: Tue, 15 Jan 2019 18:08:56 +0800 Subject: [PATCH 142/302] Remove genesis config fields --- cmd/cqld/bootstrap.go | 7 ++----- conf/config.go | 8 -------- conf/config_test.go | 8 ++------ 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 55c1ecb62..89f456552 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -236,11 +236,8 @@ func loadGenesis() (genesis *types.BPBlock, err error) { genesis = &types.BPBlock{ SignedHeader: types.BPSignedHeader{ BPHeader: types.BPHeader{ - Version: genesisInfo.Version, - Producer: proto.AccountAddress(genesisInfo.Producer), - MerkleRoot: genesisInfo.MerkleRoot, - ParentHash: genesisInfo.ParentHash, - Timestamp: genesisInfo.Timestamp, + Version: genesisInfo.Version, + Timestamp: genesisInfo.Timestamp, }, }, } diff --git a/conf/config.go b/conf/config.go index 7f3914842..5acef45a3 100644 --- a/conf/config.go +++ b/conf/config.go @@ -54,16 +54,8 @@ type BaseAccountInfo struct { type BPGenesisInfo struct { // Version defines the block version Version int32 `yaml:"Version"` - // Producer defines the block producer - Producer hash.Hash `yaml:"Producer"` - // MerkleRoot defines the transaction merkle tree's root - MerkleRoot hash.Hash `yaml:"MerkleRoot"` - // ParentHash defines the parent block's hash - ParentHash hash.Hash `yaml:"ParentHash"` // Timestamp defines the initial time of chain Timestamp time.Time `yaml:"Timestamp"` - // BlockHash defines the block hash of genesis block - BlockHash hash.Hash `yaml:"BlockHash"` // BaseAccounts defines the base accounts for testnet BaseAccounts []BaseAccountInfo `yaml:"BaseAccounts"` } diff --git a/conf/config_test.go b/conf/config_test.go index 1cdc590dd..43bfc74e5 100644 --- a/conf/config_test.go +++ b/conf/config_test.go @@ -62,12 +62,8 @@ func TestConf(t *testing.T) { }, ChainFileName: "", BPGenesis: BPGenesisInfo{ - Version: 1, - Producer: h, - MerkleRoot: h, - ParentHash: h, - Timestamp: time.Now().UTC(), - BlockHash: h, + Version: 1, + Timestamp: time.Now().UTC(), }, } Convey("LoadConfig", t, func() { From 434387cc1df5a18c406b03c9958924d1b3d54c8e Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 16 Jan 2019 12:12:12 +0800 Subject: [PATCH 143/302] Add isolation level for xenomint state --- sqlchain/chain.go | 23 ++---- sqlchain/config.go | 2 + types/init_service_type.go | 1 + types/init_service_type_gen.go | 24 +++--- worker/db.go | 2 + worker/db_config.go | 1 + worker/dbms.go | 1 + xenomint/chain.go | 11 +-- xenomint/state.go | 135 +++++++++++++++++++-------------- xenomint/state_test.go | 6 +- 10 files changed, 108 insertions(+), 98 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 8cd3eba85..1ed1f3bf3 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -19,6 +19,7 @@ package sqlchain import ( "bytes" "context" + "database/sql" "encoding/binary" "fmt" "os" @@ -174,17 +175,11 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro log.WithField("db", c.DatabaseID).Debugf("create new chain tdb %s", tdbFile) - // Open x.State - var ( - strg xi.Storage - state *x.State - ) + // Open storage + var strg xi.Storage if strg, err = xs.NewSqlite(c.DataFile); err != nil { return } - if state, err = x.NewState(c.Server, strg); err != nil { - return - } // Cache local private key var ( @@ -207,7 +202,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro tdb: tdb, bi: newBlockIndex(), ai: newAckIndex(), - st: state, + st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), cl: rpc.NewCaller(), rt: newRunTime(ctx, c), ctx: ctx, @@ -261,16 +256,10 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } // Open x.State - var ( - strg xi.Storage - xstate *x.State - ) + var strg xi.Storage if strg, err = xs.NewSqlite(c.DataFile); err != nil { return } - if xstate, err = x.NewState(c.Server, strg); err != nil { - return - } // Cache local private key var ( @@ -293,7 +282,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err tdb: tdb, bi: newBlockIndex(), ai: newAckIndex(), - st: xstate, + st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), cl: rpc.NewCaller(), rt: newRunTime(ctx, c), ctx: ctx, diff --git a/sqlchain/config.go b/sqlchain/config.go index 4de594ca8..3822870e7 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -51,4 +51,6 @@ type Config struct { TokenType types.TokenType GasPrice uint64 UpdatePeriod uint64 + + IsolationLevel int } diff --git a/types/init_service_type.go b/types/init_service_type.go index 3267445b5..5a94439df 100644 --- a/types/init_service_type.go +++ b/types/init_service_type.go @@ -39,6 +39,7 @@ type ResourceMeta struct { EncryptionKey string // encryption key for database instance UseEventualConsistency bool // use eventual consistency replication if enabled ConsistencyLevel float64 // customized strong consistency level + IsolationLevel int // customized isolation level } // ServiceInstance defines single instance to be initialized. diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go index b289ec50d..dc9356f72 100644 --- a/types/init_service_type_gen.go +++ b/types/init_service_type_gen.go @@ -76,8 +76,8 @@ func (z *InitServiceResponseHeader) Msgsize() (s int) { func (z *ResourceMeta) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88, 0x88) + // map header, size 9 + o = append(o, 0x89, 0x89) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetMiners))) for za0001 := range z.TargetMiners { if oTemp, err := z.TargetMiners[za0001].MarshalHash(); err != nil { @@ -86,20 +86,22 @@ func (z *ResourceMeta) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x88) + o = append(o, 0x89) o = hsp.AppendBool(o, z.UseEventualConsistency) - o = append(o, 0x88) + o = append(o, 0x89) o = hsp.AppendFloat64(o, z.ConsistencyLevel) - o = append(o, 0x88) + o = append(o, 0x89) o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x88) + o = append(o, 0x89) + o = hsp.AppendInt(o, z.IsolationLevel) + o = append(o, 0x89) o = hsp.AppendString(o, z.EncryptionKey) - o = append(o, 0x88) + o = append(o, 0x89) o = hsp.AppendUint16(o, z.Node) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x88) + o = append(o, 0x89) o = hsp.AppendUint64(o, z.Memory) + o = append(o, 0x89) + o = hsp.AppendUint64(o, z.Space) return } @@ -109,7 +111,7 @@ func (z *ResourceMeta) Msgsize() (s int) { for za0001 := range z.TargetMiners { s += z.TargetMiners[za0001].Msgsize() } - s += 23 + hsp.BoolSize + 17 + hsp.Float64Size + 14 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 23 + hsp.BoolSize + 17 + hsp.Float64Size + 14 + hsp.Float64Size + 15 + hsp.IntSize + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.Uint16Size + 7 + hsp.Uint64Size + 6 + hsp.Uint64Size return } diff --git a/worker/db.go b/worker/db.go index a8c07ce46..e3ced3393 100644 --- a/worker/db.go +++ b/worker/db.go @@ -159,6 +159,8 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, QueryTTL: conf.GConf.SQLChainTTL, UpdatePeriod: cfg.UpdateBlockCount, + + IsolationLevel: cfg.IsolationLevel, } if db.chain, err = sqlchain.NewChain(chainCfg); err != nil { return diff --git a/worker/db_config.go b/worker/db_config.go index 410503232..97270a627 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -35,5 +35,6 @@ type DBConfig struct { UpdateBlockCount uint64 UseEventualConsistency bool ConsistencyLevel float64 + IsolationLevel int SlowQueryTime time.Duration } diff --git a/worker/dbms.go b/worker/dbms.go index 9329ea6d1..996f1728d 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -390,6 +390,7 @@ func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err err UpdateBlockCount: conf.GConf.BillingBlockCount, UseEventualConsistency: instance.ResourceMeta.UseEventualConsistency, ConsistencyLevel: instance.ResourceMeta.ConsistencyLevel, + IsolationLevel: instance.ResourceMeta.IsolationLevel, SlowQueryTime: DefaultSlowQueryTime, } diff --git a/xenomint/chain.go b/xenomint/chain.go index 85193ce96..e207cc029 100644 --- a/xenomint/chain.go +++ b/xenomint/chain.go @@ -17,6 +17,7 @@ package xenomint import ( + "database/sql" "time" ca "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -38,9 +39,8 @@ type Chain struct { // NewChain returns new chain instance. func NewChain(filename string) (c *Chain, err error) { var ( - strg xi.Storage - state *State - priv *ca.PrivateKey + strg xi.Storage + priv *ca.PrivateKey ) // generate empty nodeId nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") @@ -49,14 +49,11 @@ func NewChain(filename string) (c *Chain, err error) { if strg, err = xs.NewSqlite(filename); err != nil { return } - if state, err = NewState(nodeID, strg); err != nil { - return - } if priv, err = kms.GetLocalPrivateKey(); err != nil { return } c = &Chain{ - state: state, + state: NewState(sql.LevelReadUncommitted, nodeID, strg), priv: priv, } return diff --git a/xenomint/state.go b/xenomint/state.go index eccdd22ff..78cf88ffd 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -30,16 +30,42 @@ import ( "github.com/pkg/errors" ) +type sqlQuerier interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +type sqlExecuter interface { + sqlQuerier + Exec(query string, args ...interface{}) (sql.Result, error) + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) + Commit() error + Rollback() error +} + +type sqlDB struct { + *sql.DB +} + +func (db *sqlDB) Commit() error { + return nil +} + +func (db *sqlDB) Rollback() error { + return nil +} + // State defines a xenomint state which is bound to a underlying storage. type State struct { + level sql.IsolationLevel + sync.RWMutex strg xi.Storage pool *pool closed bool nodeID proto.NodeID - // unc is the uncommitted transaction. - unc *sql.Tx + executer sqlExecuter maxTx uint64 lastCommitPoint uint64 current uint64 // current is the current lastSeq of the current transaction @@ -47,31 +73,43 @@ type State struct { } // NewState returns a new State bound to strg. -func NewState(nodeID proto.NodeID, strg xi.Storage) (s *State, err error) { - var t = &State{ +func NewState(level sql.IsolationLevel, nodeID proto.NodeID, strg xi.Storage) (s *State) { + s = &State{ + level: level, nodeID: nodeID, strg: strg, pool: newPool(), maxTx: 100, } - if t.unc, err = t.strg.Writer().Begin(); err != nil { - return - } - s = t + s.openSQLExecuter() return } -func (s *State) incSeq() { - atomic.AddUint64(&s.current, 1) +func (s *State) openSQLExecuter() { + var err error + if s.level == sql.LevelReadUncommitted { + if s.executer, err = s.strg.Writer().Begin(); err != nil { + log.Fatal("failed to open transaction: %v", err) + } + } else { + s.executer = &sqlDB{DB: s.strg.Writer()} + } } -func (s *State) setSeq(id uint64) { - atomic.StoreUint64(&s.current, id) +func (s *State) reader() *sql.DB { + if s.level == sql.LevelReadUncommitted { + return s.strg.DirtyReader() + } + return s.strg.Reader() +} + +func (s *State) incSeq() { + atomic.AddUint64(&s.current, 1) } // SetSeq sets the initial id of the current transaction. func (s *State) SetSeq(id uint64) { - s.setSeq(id) + atomic.StoreUint64(&s.current, id) } func (s *State) getSeq() uint64 { @@ -89,15 +127,11 @@ func (s *State) Close(commit bool) (err error) { if s.closed { return } - if s.unc != nil { + if s.executer != nil { if commit { - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } + s.commitSQLExecuter() } else { - if err = s.uncRollback(); err != nil { - log.WithError(err).Fatal("failed to rollback") - } + s.rollbackSQLExecuter() } } if err = s.strg.Close(); err != nil { @@ -115,11 +149,6 @@ func buildTypeNamesFromSQLColumnTypes(types []*sql.ColumnType) (names []string) return } -type sqlQuerier interface { - Query(query string, args ...interface{}) (*sql.Rows, error) - QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) -} - func readSingle( ctx context.Context, qer sqlQuerier, q *types.Query, ) ( @@ -187,7 +216,7 @@ func (s *State) readWithContext( ) // TODO(leventeliu): no need to run every read query here. for i, v := range req.Payload.Queries { - if cnames, ctypes, data, ierr = readSingle(ctx, s.strg.DirtyReader(), &v); ierr != nil { + if cnames, ctypes, data, ierr = readSingle(ctx, s.reader(), &v); ierr != nil { err = errors.Wrapf(ierr, "query at #%d failed", i) // Add to failed pool list s.pool.setFailed(req) @@ -225,14 +254,14 @@ func (s *State) readTx( data [][]interface{} querier sqlQuerier ) - if atomic.LoadUint32(&s.hasSchemaChange) == 1 { + if s.level == sql.LevelReadUncommitted && atomic.LoadUint32(&s.hasSchemaChange) == 1 { // lock transaction s.Lock() defer s.Unlock() - querier = s.unc + querier = s.executer } else { var tx *sql.Tx - if tx, ierr = s.strg.DirtyReader().Begin(); ierr != nil { + if tx, ierr = s.reader().Begin(); ierr != nil { err = errors.Wrap(ierr, "open tx failed") return } @@ -306,7 +335,7 @@ func (s *State) writeSingle( return } //parsed = time.Since(start) - if res, err = s.unc.Exec(pattern, args...); err == nil { + if res, err = s.executer.Exec(pattern, args...); err == nil { if containsDDL { atomic.StoreUint32(&s.hasSchemaChange, 1) } @@ -366,11 +395,11 @@ func (s *State) write( lastSeq = s.getSeq() if qcnt > 1 { // Set savepoint - if _, ierr = s.unc.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { + if _, ierr = s.executer.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to create savepoint %d", lastSeq) return } - defer s.unc.Exec(`ROLLBACK TO "?"`, lastSeq) + defer s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) } for i, v := range req.Payload.Queries { var res sql.Result @@ -388,7 +417,7 @@ func (s *State) write( } if qcnt > 1 { // Release savepoint - if _, ierr = s.unc.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { + if _, ierr = s.executer.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) return } @@ -396,7 +425,7 @@ func (s *State) write( // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.tryCommit() + s.flush() } writeDone = time.Since(start) s.pool.enqueue(lastSeq, query) @@ -449,7 +478,7 @@ func (s *State) replay(ctx context.Context, req *types.Request, resp *types.Resp // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.tryCommit() + s.flush() } s.pool.enqueue(lastSeq, query) return @@ -502,7 +531,7 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.pool.enqueue(lastsp, query) } // Always try to commit after a block is successfully replayed - s.tryCommit() + s.flush() // Remove duplicate failed queries from local pool for _, r := range block.FailedReqs { s.pool.removeFailed(r) @@ -540,12 +569,7 @@ func (s *State) commit() (err error) { lockReleased = time.Since(start) }() lockAcquired = time.Since(start) - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } - if s.unc, err = s.strg.Writer().Begin(); err != nil { - log.WithError(err).Fatal("failed to begin") - } + s.flush() committed = time.Since(start) _ = s.pool.queries s.pool = newPool() @@ -591,7 +615,7 @@ func (s *State) CommitExWithContext( lockReleased = time.Since(start) }() // Always try to commit before the block is produced - s.tryCommit() + s.flush() committed = time.Since(start) // Return pooled items and reset failed = s.pool.failedList() @@ -601,33 +625,26 @@ func (s *State) CommitExWithContext( return } -func (s *State) tryCommit() { - var err error - if err = s.uncCommit(); err != nil { - log.WithError(err).Fatal("failed to commit") - } - if s.unc, err = s.strg.Writer().Begin(); err != nil { - log.WithError(err).Fatal("failed to begin") - } +func (s *State) flush() { + s.commitSQLExecuter() + s.openSQLExecuter() } -func (s *State) uncCommit() (err error) { - if err = s.unc.Commit(); err != nil { - return +func (s *State) commitSQLExecuter() { + if err := s.executer.Commit(); err != nil { + log.WithError(err).Fatal("failed to commit") } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) atomic.StoreUint64(&s.lastCommitPoint, s.getSeq()) - return } -func (s *State) uncRollback() (err error) { - if err = s.unc.Rollback(); err != nil { - return +func (s *State) rollbackSQLExecuter() { + if err := s.executer.Rollback(); err != nil { + log.WithError(err).Fatal("failed to rollback") } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) - return } func (s *State) getLocalTime() time.Time { diff --git a/xenomint/state_test.go b/xenomint/state_test.go index e5b478a00..a3292f8d1 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -47,8 +47,7 @@ func TestState(t *testing.T) { strg1, err = xs.NewSqlite(fmt.Sprint("file:", fl1)) So(err, ShouldBeNil) So(strg1, ShouldNotBeNil) - st1, err = NewState(nodeID, strg1) - So(err, ShouldBeNil) + st1 = NewState(sql.LevelReadUncommitted, nodeID, strg1) So(st1, ShouldNotBeNil) Reset(func() { // Clean database file after each pass @@ -64,8 +63,7 @@ func TestState(t *testing.T) { strg2, err = xs.NewSqlite(fmt.Sprint("file:", fl2)) So(err, ShouldBeNil) So(strg1, ShouldNotBeNil) - st2, err = NewState(nodeID, strg2) - So(err, ShouldBeNil) + st2 = NewState(sql.LevelReadUncommitted, nodeID, strg2) So(st1, ShouldNotBeNil) Reset(func() { // Clean database file after each pass From 8a40fea629dfc0c9e50756bce5f2cfdc0599a58d Mon Sep 17 00:00:00 2001 From: leventeliu Date: Wed, 16 Jan 2019 21:26:31 +0800 Subject: [PATCH 144/302] Minor fix --- xenomint/state.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/xenomint/state.go b/xenomint/state.go index 78cf88ffd..a1714a75c 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -86,10 +86,10 @@ func NewState(level sql.IsolationLevel, nodeID proto.NodeID, strg xi.Storage) (s } func (s *State) openSQLExecuter() { - var err error if s.level == sql.LevelReadUncommitted { + var err error if s.executer, err = s.strg.Writer().Begin(); err != nil { - log.Fatal("failed to open transaction: %v", err) + log.WithError(err).Fatal("failed to open transaction") } } else { s.executer = &sqlDB{DB: s.strg.Writer()} @@ -425,7 +425,7 @@ func (s *State) write( // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.flush() + s.flushSQLExecuter() } writeDone = time.Since(start) s.pool.enqueue(lastSeq, query) @@ -478,7 +478,7 @@ func (s *State) replay(ctx context.Context, req *types.Request, resp *types.Resp // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.flush() + s.flushSQLExecuter() } s.pool.enqueue(lastSeq, query) return @@ -531,7 +531,7 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.pool.enqueue(lastsp, query) } // Always try to commit after a block is successfully replayed - s.flush() + s.flushSQLExecuter() // Remove duplicate failed queries from local pool for _, r := range block.FailedReqs { s.pool.removeFailed(r) @@ -569,7 +569,7 @@ func (s *State) commit() (err error) { lockReleased = time.Since(start) }() lockAcquired = time.Since(start) - s.flush() + s.flushSQLExecuter() committed = time.Since(start) _ = s.pool.queries s.pool = newPool() @@ -615,7 +615,7 @@ func (s *State) CommitExWithContext( lockReleased = time.Since(start) }() // Always try to commit before the block is produced - s.flush() + s.flushSQLExecuter() committed = time.Since(start) // Return pooled items and reset failed = s.pool.failedList() @@ -625,7 +625,7 @@ func (s *State) CommitExWithContext( return } -func (s *State) flush() { +func (s *State) flushSQLExecuter() { s.commitSQLExecuter() s.openSQLExecuter() } From efebf088683a6ddcdc747e9eb5ee5e7a5742e01f Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 17 Jan 2019 11:23:57 +0800 Subject: [PATCH 145/302] Minor fix --- crypto/verifier/common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/verifier/common.go b/crypto/verifier/common.go index 2167da208..dc99a8fbb 100644 --- a/crypto/verifier/common.go +++ b/crypto/verifier/common.go @@ -98,7 +98,7 @@ func (i *DefaultHashSignVerifierImpl) VerifyHash(mh MarshalHasher) (err error) { // VerifySignature implements HashSignVerifier.VerifySignature. func (i *DefaultHashSignVerifierImpl) VerifySignature() (err error) { - if i.Signature == nil || i.Signee == nil || !i.Signature.Verify(i.DataHash[:], i.Signee) { + if !i.Signature.Verify(i.DataHash[:], i.Signee) { err = errors.WithStack(ErrSignatureNotMatch) return } From 526d41f8185a2363b903debfedc158f1668b64e9 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 17 Jan 2019 14:57:34 +0800 Subject: [PATCH 146/302] Fix minerd integration test --- blockproducer/metastate.go | 20 +++++++++++--------- cmd/cql-minerd/integration_test.go | 10 ++++++---- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index af18e806b..0b2c290e6 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -729,15 +729,17 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) // create sqlchain sp := &types.SQLChainProfile{ - ID: dbID, - Address: dbAddr, - Period: sqlchainPeriod, - GasPrice: tx.GasPrice, - TokenType: types.Particle, - Owner: sender, - Users: users, - EncodedGenesis: enc.Bytes(), - Miners: miners, + ID: dbID, + Address: dbAddr, + Period: sqlchainPeriod, + GasPrice: tx.GasPrice, + LastUpdatedHeight: 0, + TokenType: types.Particle, + Owner: sender, + Miners: miners, + Users: users, + EncodedGenesis: enc.Bytes(), + Meta: tx.ResourceMeta, } if _, loaded := s.loadSQLChainObject(dbID); loaded { diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 7746d8384..7100363ea 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -149,6 +149,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner0.cover.out"), + "-log-level", "debug", }, "miner0", testWorkingDir, logDir, true, ); err == nil { @@ -162,6 +163,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"), + "-log-level", "debug", }, "miner1", testWorkingDir, logDir, false, ); err == nil { @@ -175,6 +177,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"), + "-log-level", "debug", }, "miner2", testWorkingDir, logDir, false, ); err == nil { @@ -373,8 +376,9 @@ func TestFullProcess(t *testing.T) { // client send create database transaction meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - TargetMiners: minersAddrs, - Node: uint16(len(minersAddrs)), + TargetMiners: minersAddrs, + Node: uint16(len(minersAddrs)), + IsolationLevel: int(sql.LevelReadUncommitted), }, GasPrice: testGasPrice, AdvancePayment: testAdvancePayment, @@ -540,8 +544,6 @@ func TestFullProcess(t *testing.T) { err = row.Scan(&result) c.So(err, ShouldBeNil) c.So(result, ShouldEqual, 10000000000) - - c.So(err, ShouldBeNil) }) ctx2, ccl2 := context.WithTimeout(context.Background(), 3*time.Minute) From 3e0032cac1ce77117cafe90b2e71c7ee4704517e Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 17 Jan 2019 18:10:09 +0800 Subject: [PATCH 147/302] Add test case for serializable level --- xenomint/query_sanitizer.go | 4 ++ xenomint/state.go | 18 ++++--- xenomint/state_test.go | 94 ++++++++++++++++++++++++++++++++++++- 3 files changed, 109 insertions(+), 7 deletions(-) diff --git a/xenomint/query_sanitizer.go b/xenomint/query_sanitizer.go index 760f5092c..10117d863 100644 --- a/xenomint/query_sanitizer.go +++ b/xenomint/query_sanitizer.go @@ -74,6 +74,10 @@ var ( ) func convertQueryAndBuildArgs(pattern string, args []types.NamedArg) (containsDDL bool, p string, ifs []interface{}, err error) { + if lower := strings.ToLower(pattern); strings.Contains(lower, "begin") || + strings.Contains(lower, "rollback") { + return false, pattern, nil, nil + } var ( tokenizer = sqlparser.NewStringTokenizer(pattern) queryParts []string diff --git a/xenomint/state.go b/xenomint/state.go index a1714a75c..fcbef3acd 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -393,7 +393,7 @@ func (s *State) write( lockReleased = time.Since(start) }() lastSeq = s.getSeq() - if qcnt > 1 { + if qcnt > 1 && s.level == sql.LevelReadUncommitted { // Set savepoint if _, ierr = s.executer.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to create savepoint %d", lastSeq) @@ -415,12 +415,18 @@ func (s *State) write( lastInsertID, _ = res.LastInsertId() totalAffectedRows += curAffectedRows } - if qcnt > 1 { - // Release savepoint - if _, ierr = s.executer.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { - err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) - return + if s.level == sql.LevelReadUncommitted { + if qcnt > 1 { + // Release savepoint + if _, ierr = s.executer.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { + err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) + return + } } + } else { + // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to + // committed ones. + s.executer.Exec(`ROLLBACK`) } // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || diff --git a/xenomint/state_test.go b/xenomint/state_test.go index a3292f8d1..a68a893da 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -17,10 +17,12 @@ package xenomint import ( + "context" "database/sql" "fmt" "os" "path" + "sync" "testing" "github.com/CovenantSQL/CovenantSQL/crypto/hash" @@ -33,6 +35,10 @@ import ( . "github.com/smartystreets/goconvey/convey" ) +var ( + nodeID = proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") +) + func TestState(t *testing.T) { Convey("Given a chain state object", t, func() { var ( @@ -43,7 +49,6 @@ func TestState(t *testing.T) { strg1, strg2 xi.Storage err error ) - nodeID := proto.NodeID("0000000000000000000000000000000000000000000000000000000000000000") strg1, err = xs.NewSqlite(fmt.Sprint("file:", fl1)) So(err, ShouldBeNil) So(strg1, ShouldNotBeNil) @@ -661,3 +666,90 @@ func TestConvertQueryAndBuildArgs(t *testing.T) { So(sanitizedQuery, ShouldEqual, ddlQuery) }) } + +func TestSerializableState(t *testing.T) { + Convey("Given a serialzable state", t, func() { + var ( + filePath = path.Join(testingDataDir, t.Name()) + state *State + storage xi.Storage + err error + ) + storage, err = xs.NewSqlite(fmt.Sprint("file:", filePath)) + So(err, ShouldBeNil) + So(storage, ShouldNotBeNil) + state = NewState(sql.LevelSerializable, nodeID, storage) + So(state, ShouldNotBeNil) + Reset(func() { + // Clean database file after each pass + err = state.Close(true) + So(err, ShouldBeNil) + err = os.Remove(filePath) + So(err, ShouldBeNil) + err = os.Remove(fmt.Sprint(filePath, "-shm")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + err = os.Remove(fmt.Sprint(filePath, "-wal")) + So(err == nil || os.IsNotExist(err), ShouldBeTrue) + }) + Convey("When a basic KV table is created", func() { + var ( + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + }) + resp *types.Response + ) + _, resp, err = state.Query(req) + So(err, ShouldBeNil) + So(resp, ShouldNotBeNil) + Convey("The state should not see uncommitted changes", func(c C) { + // Build transaction query + var ( + count = 1000 + queries = make([]types.Query, count+1) + req *types.Request + ) + queries[0] = buildQuery(`BEGIN`) + for i := 0; i < count; i++ { + queries[i+1] = buildQuery( + `INSERT INTO t1(k, v) VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ) + } + req = buildRequest(types.WriteQuery, queries) + // Send uncommitted transaction on background + var ( + wg = &sync.WaitGroup{} + ctx, cancel = context.WithCancel(context.Background()) + ) + defer func() { + cancel() + wg.Wait() + }() + wg.Add(1) + go func() { + defer wg.Done() + for { + var _, resp, err = state.Query(req) + c.So(err, ShouldBeNil) + c.So(resp.Header.RowCount, ShouldEqual, 0) + select { + case <-ctx.Done(): + return + default: + } + } + }() + // Test isolation level + for i := 0; i < count; i++ { + _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), + })) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(0)}}}, + }) + } + }) + }) + }) +} From 5393e18d6bb9c68129146ea0f93710798a3ac829 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 17 Jan 2019 18:39:38 +0800 Subject: [PATCH 148/302] Fix client test case --- client/helper_test.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/client/helper_test.go b/client/helper_test.go index 0db24c669..7ff55c20b 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -18,6 +18,7 @@ package client import ( "bytes" + "database/sql" "fmt" "io/ioutil" "math/rand" @@ -149,8 +150,11 @@ func startTestService() (stopTestService func(), tempDir string, err error) { req = new(types.UpdateService) req.Header.Op = types.CreateDB req.Header.Instance = types.ServiceInstance{ - DatabaseID: dbID, - Peers: peers, + DatabaseID: dbID, + Peers: peers, + ResourceMeta: types.ResourceMeta{ + IsolationLevel: int(sql.LevelReadUncommitted), + }, GenesisBlock: block, } if req.Header.Signee, err = kms.GetLocalPublicKey(); err != nil { From ac68f6df2de25ae29364d223acf4b4cbb8a920c2 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 17 Jan 2019 21:35:16 +0800 Subject: [PATCH 149/302] Reset miner log level --- cmd/cql-minerd/integration_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 7100363ea..a802c5942 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -149,7 +149,6 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner0.cover.out"), - "-log-level", "debug", }, "miner0", testWorkingDir, logDir, true, ); err == nil { @@ -163,7 +162,6 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"), - "-log-level", "debug", }, "miner1", testWorkingDir, logDir, false, ); err == nil { @@ -177,7 +175,6 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"), - "-log-level", "debug", }, "miner2", testWorkingDir, logDir, false, ); err == nil { From 3e492e9208bb96843c573239aa810ba382fdaf8b Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 18 Jan 2019 18:04:03 +0800 Subject: [PATCH 150/302] Add cql-utils option to wait for confirmation --- blockproducer/interfaces/transaction.go | 17 +++++++++ cmd/cql-utils/rpc.go | 47 ++++++++++++++++++++++--- route/acl.go | 6 +++- 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index efcc586c3..0282cbc2d 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -129,6 +129,23 @@ const ( TransactionStateNotFound ) +func (s TransactionState) String() string { + switch s { + case TransactionStatePending: + return "Pending" + case TransactionStatePacked: + return "Packed" + case TransactionStateConfirmed: + return "Confirmed" + case TransactionStateExpired: + return "Expired" + case TransactionStateNotFound: + return "Not Found" + default: + return "Unknown" + } +} + // Transaction is the interface implemented by an object that can be verified and processed by // block producers. type Transaction interface { diff --git a/cmd/cql-utils/rpc.go b/cmd/cql-utils/rpc.go index 1f868e964..936d0f614 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql-utils/rpc.go @@ -22,6 +22,7 @@ import ( "fmt" "reflect" "strings" + "time" bp "github.com/CovenantSQL/CovenantSQL/blockproducer" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -46,9 +47,10 @@ var ( route.SQLChainRPCName: &sqlchain.MuxService{}, route.BlockProducerRPCName: &bp.ChainRPCService{}, } - rpcName string - rpcEndpoint string - rpcReq string + rpcName string + rpcEndpoint string + rpcReq string + rpcTxWaitConfirm bool ) type canSign interface { @@ -59,6 +61,7 @@ func init() { flag.StringVar(&rpcName, "rpc", "", "rpc name to do test call") flag.StringVar(&rpcEndpoint, "rpc-endpoint", "", "rpc endpoint to do test call") flag.StringVar(&rpcReq, "rpc-req", "", "rpc request to do test call, in json format") + flag.BoolVar(&rpcTxWaitConfirm, "rpc-tx-wait-confirm", false, "wait for transaction confirmation") } func runRPC() { @@ -95,9 +98,10 @@ func runRPC() { } // fill nonce if this is a AddTx request + var tx pi.Transaction if rpcName == route.MCCAddTx.String() { if addTxReqType, ok := req.(*types.AddTxReq); ok { - var tx = addTxReqType.Tx + tx = addTxReqType.Tx for { if txWrapper, ok := tx.(*pi.TransactionWrapper); ok { tx = txWrapper.Unwrap() @@ -138,6 +142,41 @@ func runRPC() { // print the response log.Info("got response") spewCfg.Dump(resp) + + if rpcName == route.MCCAddTx.String() && rpcTxWaitConfirm { + log.Info("waiting for transaction confirmation...") + var ( + err error + ticker = time.NewTicker(1 * time.Second) + req = &types.QueryTxStateReq{Hash: tx.Hash()} + resp = &types.QueryTxStateResp{} + ) + defer ticker.Stop() + for { + if err = rpc.NewCaller().CallNode( + proto.NodeID(rpcEndpoint), + route.MCCQueryTxState.String(), + req, resp, + ); err != nil { + log.Fatalf("query transaction state failed: %v", err) + } + switch resp.State { + case pi.TransactionStatePending: + fmt.Print(".") + case pi.TransactionStatePacked: + fmt.Print("+") + case pi.TransactionStateConfirmed: + fmt.Print("✔\n") + return + case pi.TransactionStateExpired, pi.TransactionStateNotFound: + fmt.Print("✘\n") + log.Fatalf("bad transaction state: %s", resp.State) + } + select { + case <-ticker.C: + } + } + } } func checkAndSign(req interface{}) (err error) { diff --git a/route/acl.go b/route/acl.go index 8efa6a1e9..2e71fe144 100644 --- a/route/acl.go +++ b/route/acl.go @@ -111,10 +111,12 @@ const ( MCCNextAccountNonce // MCCAddTx is used by block producer main chain to upload transaction MCCAddTx - // MCCQuerySQLChainProfile is used by nodes to to query SQLChainProfile. + // MCCQuerySQLChainProfile is used by nodes to query SQLChainProfile. MCCQuerySQLChainProfile // MCCQueryAccountTokenBalance is used by block producer to provide account token balance MCCQueryAccountTokenBalance + // MCCQueryTxState is used by client to query transaction state. + MCCQueryTxState // DHTRPCName defines the block producer dh-rpc service name DHTRPCName = "DHT" // BlockProducerRPCName defines main chain rpc name @@ -186,6 +188,8 @@ func (s RemoteFunc) String() string { return "MCC.QuerySQLChainProfile" case MCCQueryAccountTokenBalance: return "MCC.QueryAccountTokenBalance" + case MCCQueryTxState: + return "MCC.QueryTxState" } return "Unknown" } From 441a1ef02beeec994d9ff9d9afd8108ff3dfc199 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 18 Jan 2019 18:10:16 +0800 Subject: [PATCH 151/302] Add default case in tx state switch block --- cmd/cql-utils/rpc.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/cql-utils/rpc.go b/cmd/cql-utils/rpc.go index 936d0f614..1e7b88d92 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql-utils/rpc.go @@ -171,6 +171,9 @@ func runRPC() { case pi.TransactionStateExpired, pi.TransactionStateNotFound: fmt.Print("✘\n") log.Fatalf("bad transaction state: %s", resp.State) + default: + fmt.Print("✘\n") + log.Fatal("unknown transaction state") } select { case <-ticker.C: From e1c5350149729dedb75f08be295b90cb06938841 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Fri, 18 Jan 2019 18:13:16 +0800 Subject: [PATCH 152/302] Minor fix --- cmd/cql-utils/rpc.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/cmd/cql-utils/rpc.go b/cmd/cql-utils/rpc.go index 1e7b88d92..bc83f4ceb 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql-utils/rpc.go @@ -175,9 +175,7 @@ func runRPC() { fmt.Print("✘\n") log.Fatal("unknown transaction state") } - select { - case <-ticker.C: - } + <-ticker.C } } } From bdf8adf79b3f329dee9a797ba72b781dd3f4b627 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Fri, 18 Jan 2019 18:49:58 +0800 Subject: [PATCH 153/302] Add other cmd tool in observer image --- docker/observer.Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/observer.Dockerfile b/docker/observer.Dockerfile index 6e37949b2..59dfedd69 100644 --- a/docker/observer.Dockerfile +++ b/docker/observer.Dockerfile @@ -19,8 +19,7 @@ ENV COVENANT_CONF=config.yaml RUN apk --no-cache add ca-certificates WORKDIR /app -COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql-observer /app/ -COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/docker-entry.sh /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/* /app/ ENTRYPOINT [ "./docker-entry.sh" ] EXPOSE 4661 EXPOSE 80 From cfde40fa9909b22155d2225a596df5879a9a628c Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Sun, 20 Jan 2019 22:27:04 +0800 Subject: [PATCH 154/302] Prune useless binary tools --- docker/observer.Dockerfile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docker/observer.Dockerfile b/docker/observer.Dockerfile index 59dfedd69..d0c485796 100644 --- a/docker/observer.Dockerfile +++ b/docker/observer.Dockerfile @@ -19,7 +19,10 @@ ENV COVENANT_CONF=config.yaml RUN apk --no-cache add ca-certificates WORKDIR /app -COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/* /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql-observer /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/cql-utils /app/ +COPY --from=covenantsql/covenantsql-builder /go/src/github.com/CovenantSQL/CovenantSQL/bin/docker-entry.sh /app/ ENTRYPOINT [ "./docker-entry.sh" ] EXPOSE 4661 EXPOSE 80 From 95423a39537c0d50d09d381945cdaa7c741062bd Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 21 Jan 2019 11:08:48 +0800 Subject: [PATCH 155/302] Add method to wait for transaction confirmation --- client/driver.go | 54 ++++++++++++++++++++++++++++++++++++++++++++++-- cmd/cql/main.go | 39 ++++++++++++++++++++++++++++++++-- 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/client/driver.go b/client/driver.go index 5a77a31ce..2eba5664a 100644 --- a/client/driver.go +++ b/client/driver.go @@ -28,9 +28,11 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" @@ -260,7 +262,7 @@ func GetTokenBalance(tt types.TokenType) (balance uint64, err error) { // UpdatePermission sends UpdatePermission transaction to chain. func UpdatePermission(targetUser proto.AccountAddress, - targetChain proto.AccountAddress, perm types.UserPermission) (err error) { + targetChain proto.AccountAddress, perm types.UserPermission) (txHash hash.Hash, err error) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return @@ -307,11 +309,14 @@ func UpdatePermission(targetUser proto.AccountAddress, return } + txHash = up.Hash() return } // TransferToken send Transfer transaction to chain. -func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType types.TokenType) (err error) { +func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType types.TokenType) ( + txHash hash.Hash, err error, +) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return @@ -359,6 +364,51 @@ func TransferToken(targetUser proto.AccountAddress, amount uint64, tokenType typ return } + txHash = tran.Hash() + return +} + +// WaitTxConfirmation waits for the transaction with target hash txHash to be confirmed. It also +// returns if any error occurs or a final state is returned from BP. +func WaitTxConfirmation( + ctx context.Context, txHash hash.Hash) (state interfaces.TransactionState, err error, +) { + var ( + ticker = time.NewTicker(1 * time.Second) + method = route.MCCQueryTxState + req = &types.QueryTxStateReq{Hash: txHash} + resp = &types.QueryTxStateResp{} + ) + defer ticker.Stop() + for { + if err = requestBP(method, req, resp); err != nil { + err = errors.Wrapf(err, "failed to call %s", method) + return + } + + state = resp.State + log.WithFields(log.Fields{ + "tx_hash": txHash, + "tx_state": state, + }).Debug("waiting for tx confirmation") + + switch state { + case pi.TransactionStatePending: + case pi.TransactionStatePacked: + case pi.TransactionStateConfirmed, pi.TransactionStateExpired, pi.TransactionStateNotFound: + return + default: + err = errors.Errorf("unknown transaction state %d", state) + return + } + + select { + case <-ticker.C: + case <-ctx.Done(): + err = ctx.Err() + return + } + } return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 05225b3e4..07db14704 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -30,9 +30,13 @@ import ( "runtime" "strconv" "strings" + "time" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -67,6 +71,9 @@ var ( transferToken string // transfer token to target account getBalance bool // get balance of current account getBalanceWithTokenName string // get specific token's balance of current account + waitTxConfirmation bool // wait for transaction confirmation before exiting + + waitTxConfirmationMaxDuration time.Duration ) type userPermission struct { @@ -216,6 +223,7 @@ func init() { flag.StringVar(&transferToken, "transfer", "", "transfer token to target account") flag.BoolVar(&getBalance, "get-balance", false, "get balance of current account") flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "get specific token's balance of current account, e.g. Particle, Wave, and etc.") + flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "wait for transaction confirmation") } func main() { @@ -235,6 +243,11 @@ func main() { return } + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have + // enough informations from config to do that currently, so just use a fixed and long enough + // duration. + waitTxConfirmationMaxDuration = 10 * conf.GConf.BPPeriod + if getBalance { var stableCoinBalance, covenantCoinBalance uint64 @@ -342,7 +355,7 @@ func main() { return } - err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) if err != nil { log.WithError(err).Error("update permission failed") @@ -350,6 +363,10 @@ func main() { return } + if waitTxConfirmation { + wait(txHash) + } + log.Info("succeed in sending transaction to CovenantSQL") return } @@ -391,13 +408,18 @@ func main() { return } - err = client.TransferToken(tran.TargetUser, amount, unit) + var txHash hash.Hash + txHash, err = client.TransferToken(tran.TargetUser, amount, unit) if err != nil { log.WithError(err).Error("transfer token failed") os.Exit(-1) return } + if waitTxConfirmation { + wait(txHash) + } + log.Info("succeed in sending transaction to CovenantSQL") return } @@ -445,6 +467,19 @@ func main() { } } +func wait(txHash hash.Hash) { + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + var state, err = client.WaitTxConfirmation(ctx, txHash) + log.WithFields(log.Fields{ + "tx_hash": txHash, + "tx_state": state, + }).WithError(err).Info("wait transaction confirmation") + if err != nil || state != pi.TransactionStateConfirmed { + os.Exit(1) + } +} + func run(u *user.User) (err error) { // get working directory wd, err := os.Getwd() From 00acb22a3d78ae6dd4d6aaeb0503dfba380f68fc Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 21 Jan 2019 11:11:25 +0800 Subject: [PATCH 156/302] Minor fix --- client/driver.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/client/driver.go b/client/driver.go index 2eba5664a..5af1ace39 100644 --- a/client/driver.go +++ b/client/driver.go @@ -28,7 +28,6 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -393,9 +392,11 @@ func WaitTxConfirmation( }).Debug("waiting for tx confirmation") switch state { - case pi.TransactionStatePending: - case pi.TransactionStatePacked: - case pi.TransactionStateConfirmed, pi.TransactionStateExpired, pi.TransactionStateNotFound: + case interfaces.TransactionStatePending: + case interfaces.TransactionStatePacked: + case interfaces.TransactionStateConfirmed, + interfaces.TransactionStateExpired, + interfaces.TransactionStateNotFound: return default: err = errors.Errorf("unknown transaction state %d", state) From ba7840029dcb835f100dea193cdca89127ffed49 Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 27 Dec 2018 15:06:29 +0800 Subject: [PATCH 157/302] Keep HashStablePack latest before generate code --- genMarshalHash.sh | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/genMarshalHash.sh b/genMarshalHash.sh index f336601d3..bfd98fcac 100755 --- a/genMarshalHash.sh +++ b/genMarshalHash.sh @@ -2,10 +2,8 @@ PROJECT_DIR=$(cd $(dirname $0)/; pwd) -if [[ -x hsp ]]; then - echo "install HashStablePack cmd: hsp" - go get -u github.com/CovenantSQL/HashStablePack/hsp -fi +echo "install HashStablePack cmd: hsp" +go get -v -u github.com/CovenantSQL/HashStablePack/hsp echo ${PROJECT_DIR} From ef1c4f245d88162ea795a726addfea24fafd1ced Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 27 Dec 2018 16:49:42 +0800 Subject: [PATCH 158/302] Add readonly flag for fuse --- cmd/cql-fuse/main.go | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go index 25a62eb75..6d6738d18 100644 --- a/cmd/cql-fuse/main.go +++ b/cmd/cql-fuse/main.go @@ -80,18 +80,24 @@ import ( ) var usage = func() { - fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) - fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + _, _ = fmt.Fprintf(os.Stderr, " %s -config -dsn -mount \n\n", os.Args[0]) flag.PrintDefaults() } func main() { - var config, dsn, mountPoint, password string - + var ( + config string + dsn string + mountPoint string + password string + readOnly bool + ) flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") flag.StringVar(&mountPoint, "mount", "./", "dir to mount") flag.StringVar(&dsn, "dsn", "", "database url") flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.BoolVar(&readOnly, "readonly", false, "mount read only volume") flag.Usage = usage flag.Parse() @@ -102,11 +108,12 @@ func main() { log.Fatal(err) } + cfg, err := client.ParseDSN(dsn) if err != nil { log.Fatal(err) } - db, err := sql.Open("covenantsql", dsn) + db, err := sql.Open("covenantsql", cfg.FormatDSN()) if err != nil { log.Fatal(err) } @@ -118,13 +125,18 @@ func main() { } cfs := CFS{db} + opts := make([]fuse.MountOption, 0, 5) + opts = append(opts, fuse.FSName("CovenantFS")) + opts = append(opts, fuse.Subtype("CovenantFS")) + opts = append(opts, fuse.LocalVolume()) + opts = append(opts, fuse.VolumeName(cfg.DatabaseID)) + if readOnly { + opts = append(opts, fuse.ReadOnly()) + } // Mount filesystem. c, err := fuse.Mount( mountPoint, - fuse.FSName("CovenantFS"), - fuse.Subtype("CovenantFS"), - fuse.LocalVolume(), - fuse.VolumeName(""), + opts..., ) if err != nil { log.Fatal(err) From 667d5070be273d94fecc9f8cdaa747ad6e015de7 Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 17 Jan 2019 22:06:47 +0800 Subject: [PATCH 159/302] Make fuse blocksize = 128KB --- client/tx.go | 7 +++++-- cmd/cql-fuse/block.go | 2 +- cmd/cql-fuse/block_test.go | 3 +++ test/fuse/node_miner_0/config.yaml | 2 +- test/fuse/node_miner_1/config.yaml | 2 +- test/fuse/node_miner_2/config.yaml | 2 +- 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/client/tx.go b/client/tx.go index 047d1ccfe..578e3b8a9 100644 --- a/client/tx.go +++ b/client/tx.go @@ -21,6 +21,8 @@ import ( "context" "database/sql" "database/sql/driver" + + "github.com/pkg/errors" ) // ExecuteTx starts a transaction, and runs fn in it @@ -41,9 +43,10 @@ func ExecuteInTx(tx driver.Tx, fn func() error) (err error) { if err == nil { // Ignore commit errors. The tx has already been committed by RELEASE. err = tx.Commit() + if err != nil { + err = errors.Wrapf(err, "exec in tx") + } } else { - // We always need to execute a Rollback() so sql.DB releases the - // connection. _ = tx.Rollback() } return diff --git a/cmd/cql-fuse/block.go b/cmd/cql-fuse/block.go index 2454035c5..a71d208cf 100644 --- a/cmd/cql-fuse/block.go +++ b/cmd/cql-fuse/block.go @@ -40,7 +40,7 @@ import ( // BlockSize is the size of each data block. It must not // change throughout the lifetime of the filesystem. -const BlockSize = 4 << 10 // 4KB +const BlockSize = 128 << 10 // 128KB func min(a, b uint64) uint64 { if a < b { diff --git a/cmd/cql-fuse/block_test.go b/cmd/cql-fuse/block_test.go index 6bd917e62..08de77ee5 100644 --- a/cmd/cql-fuse/block_test.go +++ b/cmd/cql-fuse/block_test.go @@ -423,6 +423,9 @@ func TestShrinkGrow(t *testing.T) { if data, err = tryGrow(db, data, id, BlockSize*5); err != nil { log.Fatal(err) } + if data, err = tryGrow(db, data, id, BlockSize*999); err != nil { + log.Fatal(err) + } // Shrink it down to 0. if data, err = tryShrink(db, data, id, 0); err != nil { diff --git a/test/fuse/node_miner_0/config.yaml b/test/fuse/node_miner_0/config.yaml index 81ec69cfe..c40ddaad0 100644 --- a/test/fuse/node_miner_0/config.yaml +++ b/test/fuse/node_miner_0/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/test/fuse/node_miner_1/config.yaml b/test/fuse/node_miner_1/config.yaml index 84eefab7d..118567931 100644 --- a/test/fuse/node_miner_1/config.yaml +++ b/test/fuse/node_miner_1/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/test/fuse/node_miner_2/config.yaml b/test/fuse/node_miner_2/config.yaml index 34d8fd5ad..25a303475 100644 --- a/test/fuse/node_miner_2/config.yaml +++ b/test/fuse/node_miner_2/config.yaml @@ -37,7 +37,7 @@ BlockProducer: Miner: IsTestMode: true RootDir: "./data" - MaxReqTimeGap: "2s" + MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 From af35d0e3e2975cd2fa2a57891fd25677663dae67 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 21 Jan 2019 11:26:59 +0800 Subject: [PATCH 160/302] Remove out of date comment --- client/tx.go | 1 - 1 file changed, 1 deletion(-) diff --git a/client/tx.go b/client/tx.go index 578e3b8a9..ecb8b8999 100644 --- a/client/tx.go +++ b/client/tx.go @@ -41,7 +41,6 @@ func ExecuteTx( func ExecuteInTx(tx driver.Tx, fn func() error) (err error) { err = fn() if err == nil { - // Ignore commit errors. The tx has already been committed by RELEASE. err = tx.Commit() if err != nil { err = errors.Wrapf(err, "exec in tx") From 3cfc793eab300b5d43e8652f3f3c3bdb16f1853b Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 14:39:03 +0800 Subject: [PATCH 161/302] Refactor jsonrpc as a subpackage of rpc --- api/blocks.go | 6 +- api/jsonrpc.go | 86 ------------------ api/service.go | 136 +++++------------------------ api/service_test.go | 26 ++---- api/transactions.go | 6 +- cmd/cqld/bootstrap.go | 11 +-- rpc/jsonrpc/handler.go | 84 ++++++++++++++++++ {api => rpc/jsonrpc}/middleware.go | 4 +- rpc/jsonrpc/websocket.go | 70 +++++++++++++++ 9 files changed, 190 insertions(+), 239 deletions(-) delete mode 100644 api/jsonrpc.go create mode 100644 rpc/jsonrpc/handler.go rename {api => rpc/jsonrpc}/middleware.go (93%) create mode 100644 rpc/jsonrpc/websocket.go diff --git a/api/blocks.go b/api/blocks.go index dd10291eb..040e6be7d 100644 --- a/api/blocks.go +++ b/api/blocks.go @@ -9,9 +9,9 @@ import ( ) func init() { - registerMethod("bp_getBlockList", bpGetBlockList, bpGetBlockListParams{}) - registerMethod("bp_getBlockByHeight", bpGetBlockByHeight, bpGetBlockByHeightParams{}) - registerMethod("bp_getBlockByHash", bpGetBlockByHash, bpGetBlockByHashParams{}) + rpc.RegisterMethod("bp_getBlockList", bpGetBlockList, bpGetBlockListParams{}) + rpc.RegisterMethod("bp_getBlockByHeight", bpGetBlockByHeight, bpGetBlockByHeightParams{}) + rpc.RegisterMethod("bp_getBlockByHash", bpGetBlockByHash, bpGetBlockByHashParams{}) } type bpGetBlockListParams struct { diff --git a/api/jsonrpc.go b/api/jsonrpc.go deleted file mode 100644 index a2f9db986..000000000 --- a/api/jsonrpc.go +++ /dev/null @@ -1,86 +0,0 @@ -package api - -import ( - "context" - "fmt" - "reflect" - - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" - "github.com/sourcegraph/jsonrpc2" -) - -var ( - jsonrpcHandler = NewJSONRPCHandler() -) - -type jsonrpcHandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) - -func registerMethod(method string, handlerFunc jsonrpcHandlerFunc, paramsType interface{}) { - log.WithField("method", method).Debug("api: register rpc method") - - if paramsType == nil { - jsonrpcHandler.RegisterMethod(method, handlerFunc) - return - } - - // use a middleware component to pre-process params - typ := reflect.TypeOf(paramsType) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - } - - jsonrpcHandler.RegisterMethod(method, processParams(handlerFunc, typ)) -} - -// JSONRPCHandler is a handler handling JSON-RPC protocol. -type JSONRPCHandler struct { - methods map[string]jsonrpcHandlerFunc -} - -// NewJSONRPCHandler creates a new JSONRPCHandler. -func NewJSONRPCHandler() *JSONRPCHandler { - return &JSONRPCHandler{ - methods: make(map[string]jsonrpcHandlerFunc), - } -} - -// RegisterMethod register a method. -func (h *JSONRPCHandler) RegisterMethod(method string, handlerFunc jsonrpcHandlerFunc) { - h.methods[method] = handlerFunc -} - -// Handler returns a jsonrpc2.Handler. -func (h *JSONRPCHandler) Handler() jsonrpc2.Handler { - return jsonrpc2.HandlerWithError(h.handle) -} - -var methodNotFound = func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) (result interface{}, err error) { - return nil, errors.Errorf("method not found: %q", req.Method) -} - -// Handle implements jsonrpc2.Handler. -func (h *JSONRPCHandler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( - result interface{}, err error, -) { - defer func() { - if p := recover(); p != nil { - switch p := p.(type) { - case error: - err = p - default: - err = fmt.Errorf("%v", p) - } - } - }() - - fn := h.methods[req.Method] - if fn == nil { - fn = methodNotFound - } else if req.Params == nil { - // pre-check req.Params not be nil - return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams} - } - - return fn(ctx, conn, req) -} diff --git a/api/service.go b/api/service.go index 603387a80..b7153117d 100644 --- a/api/service.go +++ b/api/service.go @@ -1,135 +1,39 @@ package api import ( - "context" - "net" "net/http" - "os" - "os/signal" - "sync" - "syscall" "time" "github.com/CovenantSQL/CovenantSQL/api/models" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/gorilla/websocket" + "github.com/CovenantSQL/CovenantSQL/rpc/jsonrpc" "github.com/pkg/errors" - "github.com/sourcegraph/jsonrpc2" - wsstream "github.com/sourcegraph/jsonrpc2/websocket" ) -// Service configs the API service. -type Service struct { - DBFile string // the path to the database in which stored indexed data - WebsocketAddr string // start a websocket server - ReadTimeout time.Duration - WriteTimeout time.Duration - - stopChan chan struct{} - stopped chan struct{} -} - -// NewService creates a new Service. -func NewService() *Service { - return &Service{ - stopChan: make(chan struct{}), - stopped: make(chan struct{}), - } -} - -// StartServers start API servers in a non-blocking way, fatal on errors. -func (s *Service) StartServers() { - go s.RunServers() -} - -// StopServers top API servers. -func (s *Service) StopServers() { - close(s.stopChan) -} - -// StopServersAndWait wait servers to stop. -func (s *Service) StopServersAndWait() { - s.StopServers() - <-s.stopped -} +var ( + rpc = jsonrpc.NewHandler() + server *jsonrpc.WebsocketServer +) -// RunServers start API servers in a blocking way, fatal on errors. -func (s *Service) RunServers() { +// Serve runs an API server on the specified address and database file. +func Serve(addr, dbFile string) error { // setup database - if err := models.InitModels(s.DBFile); err != nil { - log.WithError(err).Fatal("api: init models failed") - return + if err := models.InitModels(dbFile); err != nil { + return errors.WithMessage(err, "api: init models failed") } - wg := sync.WaitGroup{} - if s.WebsocketAddr != "" { - log.WithField("addr", s.WebsocketAddr).Info("api: start websocket server") - wg.Add(1) - go s.runWebsocketServer(&wg) + server = &jsonrpc.WebsocketServer{ + Server: http.Server{ + Addr: addr, + ReadTimeout: 30 * time.Second, + WriteTimeout: 60 * time.Second, + }, + RPCHandler: rpc, } - sigchan := make(chan os.Signal) - signal.Notify(sigchan, os.Interrupt, syscall.SIGTERM) - <-sigchan - close(s.stopChan) - wg.Wait() + return server.Serve() } -func (s *Service) runWebsocketServer(wg *sync.WaitGroup) { - defer wg.Done() - - var connOpts []jsonrpc2.ConnOpt - - mux := http.NewServeMux() - upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} - - mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - conn, err := upgrader.Upgrade(rw, r, nil) - if err != nil { - log.WithError(err).Error("api: upgrade http connection to websocket failed") - http.Error(rw, errors.WithMessage(err, "could not upgrade to websocket").Error(), http.StatusBadRequest) - return - } - defer conn.Close() - - // TODO: add metric for the connections - log.Debug("received incoming connection") - <-jsonrpc2.NewConn( - context.Background(), - wsstream.NewObjectStream(conn), - jsonrpcHandler.Handler(), - connOpts..., - ).DisconnectNotify() - log.Debug("connection closed") - }) - - addr := s.WebsocketAddr - listener, err := net.Listen("tcp", addr) - if err != nil { - log.WithField("addr", addr).WithError(err).Fatal("api: couldn't bind to address") - return - } - - httpServer := &http.Server{ - Handler: mux, - ReadTimeout: s.ReadTimeout, - WriteTimeout: s.WriteTimeout, - } - - go func() { - if err := httpServer.Serve(listener); err != nil { - log.WithError(err).Error("api: websocket server serve error") - } - }() - - <-s.stopChan - - log.Warn("api: shutdown websocket server") - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - if err := httpServer.Shutdown(ctx); err != nil { - log.WithError(err).Error("shutdown server") - } - cancel() - log.Warn("api: websocket server stopped") - close(s.stopped) +// StopService stops the API server. +func StopService() { + server.Stop() } diff --git a/api/service_test.go b/api/service_test.go index 9279a3741..8f22495af 100644 --- a/api/service_test.go +++ b/api/service_test.go @@ -5,7 +5,6 @@ import ( "fmt" "os" "path/filepath" - "strconv" "testing" "time" @@ -238,18 +237,12 @@ func TestJSONRPCService(t *testing.T) { defer os.Remove(testdb + "-wal") defer os.Remove(testdb) - port := 8546 // log.SetLevel(log.DebugLevel) - service := api.NewService() - service.DBFile = testdb - service.WebsocketAddr = ":" + strconv.Itoa(port) - service.StartServers() - defer service.StopServersAndWait() + go api.Serve(":8546", testdb) + defer api.StopService() var ( - addr = fmt.Sprintf("ws://localhost:%d", port) - callOpts []jsonrpc2.CallOption - + addr = "ws://localhost:8546" conveyBlock = func(convey C, item *models.Block, cp []interface{}) { if cp == nil { convey.So(item, ShouldBeNil) @@ -293,7 +286,7 @@ func TestJSONRPCService(t *testing.T) { Convey("call method should fail if method not found", func() { var result interface{} - err := rpc.Call(context.Background(), "method_NotFound", nil, &result, callOpts...) + err := rpc.Call(context.Background(), "method_NotFound", nil, &result) So(err, ShouldNotBeNil) }) @@ -321,7 +314,7 @@ func TestJSONRPCService(t *testing.T) { for name, testCase := range testCases { Convey(name, func() { - err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result, callOpts...) + err := rpc.Call(context.Background(), "bp_getBlockList", testCase, &result) So(err, ShouldNotBeNil) }) } @@ -341,7 +334,7 @@ func TestJSONRPCService(t *testing.T) { for i, testCase := range testCases { Convey(fmt.Sprintf("case#%d: %s", i, testCase.String()), func(c C) { - err := rpc.Call(context.Background(), "bp_getBlockList", testCase.Params(), &result, callOpts...) + err := rpc.Call(context.Background(), "bp_getBlockList", testCase.Params(), &result) So(err, ShouldBeNil) So(len(result.Blocks), ShouldEqual, len(testCase.ExpectedResults)) So(result.Pagination, ShouldResemble, testCase.ExpectedPagination) @@ -369,7 +362,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getBlockByHash", []interface{}{testCase.Hash}, &result, - callOpts..., ) So(err, ShouldBeNil) conveyBlock(c, result, testCase.ExpectedResult) @@ -394,7 +386,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getBlockByHeight", []interface{}{testCase.Height}, &result, - callOpts..., ) So(err, ShouldBeNil) conveyBlock(c, result, testCase.ExpectedResult) @@ -430,7 +421,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getTransactionList", testCase, &result, - callOpts..., ) So(err, ShouldNotBeNil) }) @@ -467,7 +457,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getTransactionList", testCase.Params(), &result, - callOpts..., ) So(err, ShouldBeNil) So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) @@ -496,7 +485,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getTransactionListOfBlock", testCase, &result, - callOpts..., ) So(err, ShouldNotBeNil) }) @@ -529,7 +517,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getTransactionListOfBlock", testCase.Params(), &result, - callOpts..., ) So(err, ShouldBeNil) So(len(result.Transactions), ShouldEqual, len(testCase.ExpectedResults)) @@ -559,7 +546,6 @@ func TestJSONRPCService(t *testing.T) { "bp_getTransactionByHash", []interface{}{testCase.Hash}, &result, - callOpts..., ) So(err, ShouldBeNil) conveyTransaction(c, result, testCase.ExpectedResult) diff --git a/api/transactions.go b/api/transactions.go index b43ca7728..cc88ecaae 100644 --- a/api/transactions.go +++ b/api/transactions.go @@ -10,9 +10,9 @@ import ( ) func init() { - registerMethod("bp_getTransactionList", bpGetTransactionList, bpGetTransactionListParams{}) - registerMethod("bp_getTransactionByHash", bpGetTransactionByHash, bpGetTransactionByHashParams{}) - registerMethod("bp_getTransactionListOfBlock", bpGetTransactionListOfBlock, bpGetTransactionListOfBlockParams{}) + rpc.RegisterMethod("bp_getTransactionList", bpGetTransactionList, bpGetTransactionListParams{}) + rpc.RegisterMethod("bp_getTransactionByHash", bpGetTransactionByHash, bpGetTransactionByHashParams{}) + rpc.RegisterMethod("bp_getTransactionListOfBlock", bpGetTransactionListOfBlock, bpGetTransactionListOfBlockParams{}) } type bpGetTransactionListParams struct { diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index f01a76c19..ac35e1b99 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -170,14 +170,8 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { //go periodicPingBlockProducer() // start json-rpc server - if wsapiAddr != "" { - jsonrpcServer := api.NewService() - jsonrpcServer.DBFile = conf.GConf.BP.ChainFileName - jsonrpcServer.WebsocketAddr = wsapiAddr - jsonrpcServer.ReadTimeout = 60 * time.Second - jsonrpcServer.WriteTimeout = 60 * time.Second - jsonrpcServer.StartServers() - } + log.Info("wsapi: start service") + go api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName) signalCh := make(chan os.Signal, 1) signal.Notify( @@ -188,7 +182,6 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) <-signalCh - return } diff --git a/rpc/jsonrpc/handler.go b/rpc/jsonrpc/handler.go new file mode 100644 index 000000000..d57a3c43f --- /dev/null +++ b/rpc/jsonrpc/handler.go @@ -0,0 +1,84 @@ +package jsonrpc + +import ( + "context" + "fmt" + "reflect" + + "github.com/sourcegraph/jsonrpc2" +) + +var ( + defaultHandler = NewHandler() +) + +// HandlerFunc is a function adapter to Handler. +type HandlerFunc func(context.Context, *jsonrpc2.Conn, *jsonrpc2.Request) (interface{}, error) + +// RegisterMethod register a method to the default handler. +func RegisterMethod(method string, handlerFunc HandlerFunc, paramsType interface{}) { + defaultHandler.RegisterMethod(method, handlerFunc, paramsType) +} + +// Handler is a handler handling JSON-RPC protocol. +type Handler struct { + methods map[string]HandlerFunc +} + +// NewHandler creates a new JSONRPCHandler. +func NewHandler() *Handler { + return &Handler{ + methods: make(map[string]HandlerFunc), + } +} + +// RegisterMethod register a method. +func (h *Handler) RegisterMethod(method string, handlerFunc HandlerFunc, paramsType interface{}) { + if _, ok := h.methods[method]; ok { + panic(fmt.Sprintf("method %q already registered", method)) + } + + if paramsType == nil { + h.methods[method] = handlerFunc + return + } + + // Pre-process rpc parameters with a middleware + typ := reflect.TypeOf(paramsType) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + + h.methods[method] = processParams(handlerFunc, typ) +} + +// Handle implements jsonrpc2.Handler. +func (h *Handler) Handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) { + jsonrpc2.HandlerWithError(h.handle).Handle(ctx, conn, req) +} + +// handle is a function to be used by jsonrpc2.Handler. +func (h *Handler) handle(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( + result interface{}, err error, +) { + defer func() { + if p := recover(); p != nil { + switch p := p.(type) { + case error: + err = p + default: + err = fmt.Errorf("%v", p) + } + } + }() + + fn := h.methods[req.Method] + if fn == nil { + return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeMethodNotFound} + } else if req.Params == nil { + // pre-check req.Params not be nil + return nil, &jsonrpc2.Error{Code: jsonrpc2.CodeInvalidParams} + } + + return fn(ctx, conn, req) +} diff --git a/api/middleware.go b/rpc/jsonrpc/middleware.go similarity index 93% rename from api/middleware.go rename to rpc/jsonrpc/middleware.go index 352807c7f..378cadfa8 100644 --- a/api/middleware.go +++ b/rpc/jsonrpc/middleware.go @@ -1,4 +1,4 @@ -package api +package jsonrpc import ( "context" @@ -15,7 +15,7 @@ type Validator interface { } // middleware: unmarshal req.Params(JSON array) to pre-defined structures (Object) -func processParams(h jsonrpcHandlerFunc, paramsType reflect.Type) jsonrpcHandlerFunc { +func processParams(h HandlerFunc, paramsType reflect.Type) HandlerFunc { return func(ctx context.Context, conn *jsonrpc2.Conn, req *jsonrpc2.Request) ( result interface{}, err error, ) { diff --git a/rpc/jsonrpc/websocket.go b/rpc/jsonrpc/websocket.go new file mode 100644 index 000000000..ae02a62cf --- /dev/null +++ b/rpc/jsonrpc/websocket.go @@ -0,0 +1,70 @@ +package jsonrpc + +import ( + "context" + "net" + "net/http" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/gorilla/websocket" + "github.com/pkg/errors" + "github.com/sourcegraph/jsonrpc2" + wsstream "github.com/sourcegraph/jsonrpc2/websocket" +) + +// WebsocketServer is a websocket server providing JSON-RPC API service. +type WebsocketServer struct { + http.Server + RPCHandler jsonrpc2.Handler +} + +// Serve accepts incoming connections and serve each. +func (ws *WebsocketServer) Serve() error { + var ( + mux = http.NewServeMux() + upgrader = websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + handler = ws.RPCHandler + ) + + if handler == nil { + handler = defaultHandler + } + + mux.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(rw, r, nil) + if err != nil { + log.WithError(err).Error("jsonrpc: upgrade http connection to websocket failed") + http.Error(rw, errors.WithMessage(err, "could not upgrade to websocket").Error(), http.StatusBadRequest) + return + } + defer conn.Close() + + // TODO: add metric for the connections + <-jsonrpc2.NewConn( + context.Background(), + wsstream.NewObjectStream(conn), + handler, + ).DisconnectNotify() + }) + + addr := ws.Addr + listener, err := net.Listen("tcp", addr) + if err != nil { + return errors.Wrapf(err, "couldn't bind to address %q", addr) + } + + ws.Handler = mux + return ws.Server.Serve(listener) +} + +// Stop stops the server and returns a channel indicating server is stopped. +func (ws *WebsocketServer) Stop() { + log.Warn("jsonrpc: shutdown server") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + if err := ws.Server.Shutdown(ctx); err != nil { + log.WithError(err).Error("jsonrpc: shutdown server") + } + cancel() + log.Warn("jsonrpc: server stopped") +} From aabf443d1bc8a62e384ae67f5ce7e113785926eb Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 14:45:11 +0800 Subject: [PATCH 162/302] Fix missing call to api.StopService --- cmd/cqld/bootstrap.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index ac35e1b99..12a22bb03 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -172,6 +172,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // start json-rpc server log.Info("wsapi: start service") go api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName) + defer api.StopService() signalCh := make(chan os.Signal, 1) signal.Notify( From d11b0c90a305ee69c407905c3ba48b687fb14035 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 15:42:54 +0800 Subject: [PATCH 163/302] Remove gotest -v option for gitlab CI --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 92fc37e5b..62e5afced 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,7 +27,7 @@ test-my-project: script: - make clean - make use_all_cores - - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out + - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ From 0f00bc66cb31ee697565934b50494b6e9e2e747c Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 15:53:57 +0800 Subject: [PATCH 164/302] Fix data race on api.server instance --- api/service.go | 19 ++++++++++--------- cmd/cqld/bootstrap.go | 1 - 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/api/service.go b/api/service.go index b7153117d..510a20462 100644 --- a/api/service.go +++ b/api/service.go @@ -14,22 +14,23 @@ var ( server *jsonrpc.WebsocketServer ) -// Serve runs an API server on the specified address and database file. -func Serve(addr, dbFile string) error { - // setup database - if err := models.InitModels(dbFile); err != nil { - return errors.WithMessage(err, "api: init models failed") - } - +func init() { server = &jsonrpc.WebsocketServer{ Server: http.Server{ - Addr: addr, ReadTimeout: 30 * time.Second, WriteTimeout: 60 * time.Second, }, - RPCHandler: rpc, } +} +// Serve runs an API server on the specified address and database file. +func Serve(addr, dbFile string) error { + // setup database + if err := models.InitModels(dbFile); err != nil { + return errors.WithMessage(err, "api: init models failed") + } + server.Addr = addr + server.RPCHandler = rpc return server.Serve() } diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 12a22bb03..ac35e1b99 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -172,7 +172,6 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { // start json-rpc server log.Info("wsapi: start service") go api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName) - defer api.StopService() signalCh := make(chan os.Signal, 1) signal.Notify( From a0749dd7bf0de96e319df470f565a38339eeb775 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 15 Jan 2019 18:09:00 +0800 Subject: [PATCH 165/302] Add flag skipMasterKey in cql-utils for using empty master key --- cmd/cql-utils/main.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 052f5b87c..ce94a3dd8 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -33,6 +33,7 @@ var ( publicKeyHex string privateKeyFile string configFile string + skipMasterKey bool showVersion bool ) @@ -45,6 +46,7 @@ func init() { flag.StringVar(&publicKeyHex, "public", "", "public key hex string to mine node id/nonce") flag.StringVar(&privateKeyFile, "private", "private.key", "private key file to generate/show") flag.StringVar(&configFile, "config", "config.yaml", "config file to use") + flag.BoolVar(&skipMasterKey, "skip-master-key", false, "use empty master key") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -100,6 +102,9 @@ func main() { } func readMasterKey() (string, error) { + if skipMasterKey { + return "", nil + } fmt.Println("Enter master key(press Enter for default: \"\"): ") bytePwd, err := terminal.ReadPassword(int(syscall.Stdin)) fmt.Println() From 4ed678006a5e34ceedc6f5890377e2d1b3091179 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 18 Jan 2019 00:33:13 +0800 Subject: [PATCH 166/302] Add testnet create account test. --- cmd/cql/main.go | 1 + test/testnet_client/run.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100755 test/testnet_client/run.sh diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 07db14704..542ab60fa 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -335,6 +335,7 @@ func main() { } log.Infof("the newly created database is: %#v", dsn) + fmt.Printf(dsn) return } diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh new file mode 100755 index 000000000..d2d8c9877 --- /dev/null +++ b/test/testnet_client/run.sh @@ -0,0 +1,30 @@ +#!/bin/bash -x + +TEST_WD=$(cd $(dirname $0)/; pwd) +PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) + +echo ${PROJECT_DIR} + +# Build +cd ${PROJECT_DIR} && make clean +cd ${PROJECT_DIR} && make use_all_cores + +cd ${TEST_WD} +${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key +${PROJECT_DIR}/bin/cql-utils -tool addrgen -private ./conf/private.key -skip-master-key | tee wallet.txt + +#get wallet addr +wallet=$(awk '{print $3}' wallet.txt) + +#transfer some coin to above address +${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ + '{"addr":"'${wallet}'", "amount":"100 Particle"}' + +${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 | tee dsn.txt + +#get dsn +dsn=$(cat dsn.txt) +${PROJECT_DIR}/bin/cql -config conf/config.yaml -get-balance +${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + From c0f2278a0f261b77e74580296a144e1d46093c7e Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 18 Jan 2019 12:34:11 +0800 Subject: [PATCH 167/302] Increase Particle balance for testnet create db test, add sleep for transaction done. --- test/testnet_client/run.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index d2d8c9877..18c84df75 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -18,13 +18,22 @@ wallet=$(awk '{print $3}' wallet.txt) #transfer some coin to above address ${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ - '{"addr":"'${wallet}'", "amount":"100 Particle"}' + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' + +sleep 60 ${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 | tee dsn.txt #get dsn dsn=$(cat dsn.txt) + +sleep 60 + ${PROJECT_DIR}/bin/cql -config conf/config.yaml -get-balance ${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' +${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ + -command 'show tables;' | tee result.log + +grep "1 row" result.log From e065452e2cf337035fb13aaa76ca15df66138fce Mon Sep 17 00:00:00 2001 From: laodouya Date: Sun, 20 Jan 2019 22:14:46 +0800 Subject: [PATCH 168/302] Add testnet client test to gitlab ci --- .gitlab-ci.yml | 1 + test/testnet_client/run.sh | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 92fc37e5b..e3ef4ccf1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,5 +32,6 @@ test-my-project: - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - bash test/testnet_client/run.sh - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 18c84df75..9bf63dbbd 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -1,16 +1,18 @@ #!/bin/bash -x +set -e + TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) echo ${PROJECT_DIR} # Build -cd ${PROJECT_DIR} && make clean -cd ${PROJECT_DIR} && make use_all_cores +# cd ${PROJECT_DIR} && make clean +# cd ${PROJECT_DIR} && make use_all_cores cd ${TEST_WD} -${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key +echo -ne "y\n" | ${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key ${PROJECT_DIR}/bin/cql-utils -tool addrgen -private ./conf/private.key -skip-master-key | tee wallet.txt #get wallet addr From 1d4e980cfa480c657f1f46fa89bdecccb8f635ed Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 10:20:53 +0800 Subject: [PATCH 169/302] Split gitlab ci test work to three sub jobs for parallel test. --- .gitlab-ci.yml | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e3ef4ccf1..d8c48095c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,17 +21,26 @@ before_script: - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog + - make clean + - make use_all_cores test-my-project: stage: test script: - - make clean - - make use_all_cores - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out + - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out + - bash <(curl -s https://codecov.io/bash) + +bench-my-project: + stage: test + script: - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - bash cleanupDB.sh || true + +compatibility-testnet: + stage: test + script: - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash test/testnet_client/run.sh - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - - bash <(curl -s https://codecov.io/bash) From 1b6f20819ec630744e9ace2f6776ec6fc9c41ac9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 10:36:22 +0800 Subject: [PATCH 170/302] Add gitlab ci build stage. --- .gitlab-ci.yml | 43 +++++++++++++++++++++++++------------------ 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d8c48095c..59358b8eb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,24 +5,31 @@ variables: REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN -before_script: - # Setup dependency management tool -# - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep -# - chmod +x $GOPATH/bin/dep -# - go get github.com/mattn/goveralls -# - go get github.com/haya14busa/goverage -# - go get github.com/golang/lint/golint -# - go get github.com/haya14busa/reviewdog/cmd/reviewdog -# - go get github.com/wadey/gocovmerge - - mkdir -p $GOPATH/src/github.com/CovenantSQL - - cp -r /builds/thunderdb/CovenantSQL $GOPATH/src/github.com/CovenantSQL/ - - cd $GOPATH/src/github.com/CovenantSQL/CovenantSQL -# - dep ensure - - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" - - ulimit -n 8192 -# - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog - - make clean - - make use_all_cores +stages: + - build + - test + +build: + stage: build + before_script: + # Setup dependency management tool + # - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep + # - chmod +x $GOPATH/bin/dep + # - go get github.com/mattn/goveralls + # - go get github.com/haya14busa/goverage + # - go get github.com/golang/lint/golint + # - go get github.com/haya14busa/reviewdog/cmd/reviewdog + # - go get github.com/wadey/gocovmerge + - mkdir -p $GOPATH/src/github.com/CovenantSQL + - cp -r /builds/thunderdb/CovenantSQL $GOPATH/src/github.com/CovenantSQL/ + - cd $GOPATH/src/github.com/CovenantSQL/CovenantSQL + # - dep ensure + - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" + - ulimit -n 8192 + # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog + script: + - make clean + - make use_all_cores test-my-project: stage: test From 79d0af4405a440fdd545600cc7d7d018d8d47900 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 10:41:37 +0800 Subject: [PATCH 171/302] Reset GOPATH for every job at gitlab ci. --- .gitlab-ci.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 59358b8eb..8b9b6e329 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,9 +9,7 @@ stages: - build - test -build: - stage: build - before_script: +before_script: # Setup dependency management tool # - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep # - chmod +x $GOPATH/bin/dep @@ -27,6 +25,9 @@ build: - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog + +build: + stage: build script: - make clean - make use_all_cores From 6f0b542f576d18bae94560b9584840a28b28e3ff Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 11:34:29 +0800 Subject: [PATCH 172/302] Make local miner tests in one gitlabci jobs. --- .gitlab-ci.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8b9b6e329..1464e3779 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -38,10 +38,6 @@ test-my-project: - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) - -bench-my-project: - stage: test - script: - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ From 9f1a9655b8dd5e57c99fbe6445cea26c84cf1cc9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 12:11:32 +0800 Subject: [PATCH 173/302] Copy gitlabci build stage bins to test stage. --- .gitlab-ci.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1464e3779..4467362e8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,9 +31,15 @@ build: script: - make clean - make use_all_cores + artifacts: + untracked: true + paths: + - $GOPATH/src/github.com/CovenantSQL/CovenantSQL/bin/ test-my-project: stage: test + dependencies: + - build script: - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out @@ -45,6 +51,8 @@ test-my-project: compatibility-testnet: stage: test + dependencies: + - build script: - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash test/testnet_client/run.sh From 8ae2f3d2131259dcabb6423a28c1c5fae21dae5d Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 15:36:53 +0800 Subject: [PATCH 174/302] Fix gitlab ci artifacts path wrong --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4467362e8..4c8271185 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -31,10 +31,11 @@ build: script: - make clean - make use_all_cores + - cp -r $GOPATH/src/github.com/CovenantSQL/CovenantSQL/bin /builds/thunderdb/CovenantSQL/ artifacts: untracked: true paths: - - $GOPATH/src/github.com/CovenantSQL/CovenantSQL/bin/ + - bin/ test-my-project: stage: test From a488ab0d26595bb95a81a06e972521f070cb17f9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 16:01:22 +0800 Subject: [PATCH 175/302] Change gitlab ci stages from artifacts to cache --- .gitlab-ci.yml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4c8271185..059f87d68 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,21 +26,20 @@ before_script: - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog +cache: + untracked: true + paths: + - bin/ + build: stage: build script: - make clean - make use_all_cores - cp -r $GOPATH/src/github.com/CovenantSQL/CovenantSQL/bin /builds/thunderdb/CovenantSQL/ - artifacts: - untracked: true - paths: - - bin/ test-my-project: stage: test - dependencies: - - build script: - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out @@ -52,8 +51,6 @@ test-my-project: compatibility-testnet: stage: test - dependencies: - - build script: - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash test/testnet_client/run.sh From 6df0f8332d246b7de81fba1e9963c2b634ad4110 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 16:19:33 +0800 Subject: [PATCH 176/302] Add git commit id as gitlabci cache key. --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 059f87d68..7bab48ab3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,6 +28,7 @@ before_script: cache: untracked: true + key: "$CI_COMMIT_REF_SLUG" paths: - bin/ @@ -41,6 +42,7 @@ build: test-my-project: stage: test script: + - cp -r /cache/bin $GOPATH/src/github.com/CovenantSQL/CovenantSQL/ - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) @@ -52,5 +54,6 @@ test-my-project: compatibility-testnet: stage: test script: + - cp -r /cache/bin $GOPATH/src/github.com/CovenantSQL/CovenantSQL/ - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash test/testnet_client/run.sh From 422b78f5d7d9825bbef7c7c3e86207404d0bc2e7 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 16:33:15 +0800 Subject: [PATCH 177/302] Add -coverpkg=all option to go test --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 62e5afced..8a6bde966 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,7 +27,7 @@ test-my-project: script: - make clean - make use_all_cores - - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out + - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverpkg=all -coverprofile cover.out - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ From 157be0c2ec337da80141de3b3e405b5fa4e2b12d Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 16:54:44 +0800 Subject: [PATCH 178/302] Tweak -coverpkg option for gotest --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8a6bde966..d2c7887df 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -27,7 +27,7 @@ test-my-project: script: - make clean - make use_all_cores - - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverpkg=all -coverprofile cover.out + - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverpkg=github.com/CovenantSQL/CovenantSQL/... -coverprofile cover.out - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ From 4aedb891b6de9b766289c0b3ce85e42b5f49fee1 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Mon, 21 Jan 2019 17:01:34 +0800 Subject: [PATCH 179/302] Add parameter for eventual consistency --- cmd/cql-minerd/bench.sh | 8 +++- cmd/cql-minerd/integration_test.go | 60 +++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 11 deletions(-) diff --git a/cmd/cql-minerd/bench.sh b/cmd/cql-minerd/bench.sh index c36b31bb6..837b8d9eb 100755 --- a/cmd/cql-minerd/bench.sh +++ b/cmd/cql-minerd/bench.sh @@ -8,4 +8,10 @@ go test -bench=^BenchmarkMinerOneNoSign$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerTwo$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerTwoNoSign$ -benchtime=10s -run ^$ && \ go test -bench=^BenchmarkMinerThree$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThreeNoSign$ -benchtime=10s -run ^$ +go test -bench=^BenchmarkMinerThreeNoSign$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerOneWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerOneNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerTwoWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerTwoNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerThreeWithEventualConsistency$ -benchtime=10s -run ^$ && \ +go test -bench=^BenchmarkMinerThreeNoSignWithEventualConsistency$ -benchtime=10s -run ^$ diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index a802c5942..86b83fb41 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -715,7 +715,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { So(err, ShouldBeNil) } -func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { +func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualConsistency bool) { log.Warnf("benchmark for %d Miners, BypassSignature: %v", minerCount, bypassSign) asymmetric.BypassSignature = bypassSign if minerCount > 0 { @@ -751,8 +751,12 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool) { var dsn string if minerCount > 0 { // create - meta := client.ResourceMeta{} - meta.Node = minerCount + meta := client.ResourceMeta{ + ResourceMeta: types.ResourceMeta{ + Node: minerCount, + UseEventualConsistency: useEventualConsistency, + }, + } // wait for chain service var ctx1, cancel1 = context.WithTimeout(context.Background(), 1*time.Minute) defer cancel1() @@ -900,43 +904,79 @@ func benchOutsideMinerWithTargetMinerList( func BenchmarkMinerOneNoSign(b *testing.B) { Convey("bench single node", b, func() { - benchMiner(b, 1, true) + benchMiner(b, 1, true, false) }) } func BenchmarkMinerTwoNoSign(b *testing.B) { Convey("bench two node", b, func() { - benchMiner(b, 2, true) + benchMiner(b, 2, true, false) }) } func BenchmarkMinerThreeNoSign(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 3, true) + benchMiner(b, 3, true, false) }) } func BenchmarkMinerOne(b *testing.B) { Convey("bench single node", b, func() { - benchMiner(b, 1, false) + benchMiner(b, 1, false, false) }) } func BenchmarkMinerTwo(b *testing.B) { Convey("bench two node", b, func() { - benchMiner(b, 2, false) + benchMiner(b, 2, false, false) }) } func BenchmarkMinerThree(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 3, false) + benchMiner(b, 3, false, false) + }) +} + +func BenchmarkMinerOneNoSignWithEventualConsistency(b *testing.B) { + Convey("bench single node", b, func() { + benchMiner(b, 1, true, true) + }) +} + +func BenchmarkMinerTwoNoSignWithEventualConsistency(b *testing.B) { + Convey("bench two node", b, func() { + benchMiner(b, 2, true, true) + }) +} + +func BenchmarkMinerThreeNoSignWithEventualConsistency(b *testing.B) { + Convey("bench three node", b, func() { + benchMiner(b, 3, true, true) + }) +} + +func BenchmarkMinerOneWithEventualConsistency(b *testing.B) { + Convey("bench single node", b, func() { + benchMiner(b, 1, false, true) + }) +} + +func BenchmarkMinerTwoWithEventualConsistency(b *testing.B) { + Convey("bench two node", b, func() { + benchMiner(b, 2, false, true) + }) +} + +func BenchmarkMinerThreeWithEventualConsistency(b *testing.B) { + Convey("bench three node", b, func() { + benchMiner(b, 3, false, true) }) } func BenchmarkClientOnly(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 0, false) + benchMiner(b, 0, false, false) }) } From 51d49530d320cc0e4c1f152425276d4e3ce03d08 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 21 Jan 2019 16:27:32 +0800 Subject: [PATCH 180/302] Remove gitlab ci cache. Remove stage for gitlabci. Split make in different jobs --- .gitlab-ci.yml | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7bab48ab3..d45bef29e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,10 +5,6 @@ variables: REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN -stages: - - build - - test - before_script: # Setup dependency management tool # - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep @@ -26,23 +22,11 @@ before_script: - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog -cache: - untracked: true - key: "$CI_COMMIT_REF_SLUG" - paths: - - bin/ - -build: - stage: build - script: - - make clean - - make use_all_cores - - cp -r $GOPATH/src/github.com/CovenantSQL/CovenantSQL/bin /builds/thunderdb/CovenantSQL/ - test-my-project: stage: test script: - - cp -r /cache/bin $GOPATH/src/github.com/CovenantSQL/CovenantSQL/ + - make clean + - make -j6 bp miner observer - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - bash <(curl -s https://codecov.io/bash) @@ -54,6 +38,7 @@ test-my-project: compatibility-testnet: stage: test script: - - cp -r /cache/bin $GOPATH/src/github.com/CovenantSQL/CovenantSQL/ + - make clean + - make -j8 client - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash test/testnet_client/run.sh From de0c01b28d6105342c7974e5727d33c47469c7a3 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 17:58:20 +0800 Subject: [PATCH 181/302] Fix go test build error, split tests --- .gitlab-ci.yml | 21 +++++++++++---------- Makefile | 2 ++ 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d2c7887df..82f4ce39f 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,17 +7,17 @@ variables: before_script: # Setup dependency management tool -# - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep -# - chmod +x $GOPATH/bin/dep -# - go get github.com/mattn/goveralls -# - go get github.com/haya14busa/goverage -# - go get github.com/golang/lint/golint -# - go get github.com/haya14busa/reviewdog/cmd/reviewdog -# - go get github.com/wadey/gocovmerge + # - curl -L -s https://github.com/golang/dep/releases/download/v0.4.1/dep-linux-amd64 -o $GOPATH/bin/dep + # - chmod +x $GOPATH/bin/dep + # - go get github.com/mattn/goveralls + # - go get github.com/haya14busa/goverage + # - go get github.com/golang/lint/golint + # - go get github.com/haya14busa/reviewdog/cmd/reviewdog + # - go get github.com/wadey/gocovmerge - mkdir -p $GOPATH/src/github.com/CovenantSQL - cp -r /builds/thunderdb/CovenantSQL $GOPATH/src/github.com/CovenantSQL/ - cd $GOPATH/src/github.com/CovenantSQL/CovenantSQL -# - dep ensure + # - dep ensure - mkdir -p ~/bin/ && export PATH="~/bin/:$PATH" - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog @@ -27,10 +27,11 @@ test-my-project: script: - make clean - make use_all_cores - - go test -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverpkg=github.com/CovenantSQL/CovenantSQL/... -coverprofile cover.out + - go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "common.cover.out" $(go list ./... | grep -v "/vendor/" | grep -v "github.com/CovenantSQL/CovenantSQL/cmd/") + - for package in $(ls cmd/); do go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "integ.${package}.cover.out" "github.com/CovenantSQL/CovenantSQL/cmd/${package}"; done - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out + - gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out - bash <(curl -s https://codecov.io/bash) diff --git a/Makefile b/Makefile index f46233beb..54ab5bef7 100644 --- a/Makefile +++ b/Makefile @@ -228,6 +228,8 @@ all: bp miner observer client clean: rm -rf bin/cql* + rm -f *.cover.out + rm -f coverage.txt .PHONY: status start stop logs push push_testnet clean \ bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils bin/cql-observer bin/cql-observer.test \ From 28f38bf9bda9e9b45d2bd004807ec22915dede7e Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 18:20:56 +0800 Subject: [PATCH 182/302] Fix msgpack occasionally decode error --- utils/msgpack.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/utils/msgpack.go b/utils/msgpack.go index f443c9184..29dc9a6a1 100644 --- a/utils/msgpack.go +++ b/utils/msgpack.go @@ -39,18 +39,16 @@ func RegisterInterfaceToMsgPack(intf, impl reflect.Type) (err error) { // DecodeMsgPack reverses the encode operation on a byte slice input. func DecodeMsgPack(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) - dec := codec.NewDecoder(r, msgPackHandle) + dec := codec.NewDecoder(bytes.NewReader(buf), msgPackHandle) return dec.Decode(out) } // DecodeMsgPackPlain reverses the encode operation on a byte slice input without RawToString setting. func DecodeMsgPackPlain(buf []byte, out interface{}) error { - r := bytes.NewBuffer(buf) hd := &codec.MsgpackHandle{ WriteExt: true, } - dec := codec.NewDecoder(r, hd) + dec := codec.NewDecoder(bytes.NewReader(buf), hd) return dec.Decode(out) } From 789dbdd8054a0535bd4b8a01c22991bedeb63519 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Mon, 21 Jan 2019 18:46:56 +0800 Subject: [PATCH 183/302] Fix bug to avoid ack DDoS and add timeout for connect db --- cmd/cql/main.go | 7 ++++++- sqlchain/chain.go | 8 ++++---- worker/dbms.go | 9 +++++++++ 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 05225b3e4..a218c393e 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -30,6 +30,7 @@ import ( "runtime" "strconv" "strings" + "time" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -170,7 +171,9 @@ func init() { log.Infof("connecting to %#v", url.DSN) // wait for database to become ready - if err = client.WaitDBCreation(context.Background(), dsn); err != nil { + ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + if err = client.WaitDBCreation(ctx, dsn); err != nil { return } @@ -440,8 +443,10 @@ func main() { bindings = append(bindings, name) } log.Infof("available drivers are: %#v", bindings) + os.Exit(-1) return } + os.Exit(-1) } } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 8cd3eba85..b41deea32 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -550,13 +550,13 @@ func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash().AsBytes()) - if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) + if err = c.register(ack); err != nil { + err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) return } - if err = c.register(ack); err != nil { - err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) + if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { + err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) return } diff --git a/worker/dbms.go b/worker/dbms.go index 9329ea6d1..a476e3e19 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -463,6 +463,15 @@ func (dbms *DBMS) Ack(ack *types.Ack) (err error) { var db *Database var exists bool + // check permission + addr, err := crypto.PubKeyHash(ack.Header.Signee) + if err != nil { + return + } + err = dbms.checkPermission(addr, ack.Header.Response.Request.DatabaseID, types.ReadQuery) + if err != nil { + return + } // find database if db, exists = dbms.getMeta(ack.Header.Response.Request.DatabaseID); !exists { err = ErrNotExists From 19aa18b41e503c93de2e36e9d652f948c5fce4de Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Mon, 21 Jan 2019 18:53:11 +0800 Subject: [PATCH 184/302] Prune useless code --- cmd/cql/main.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index cd43df540..156c283df 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -464,8 +464,6 @@ func main() { bindings = append(bindings, name) } log.Infof("available drivers are: %#v", bindings) - os.Exit(-1) - return } os.Exit(-1) } From 198f1c5dfd5e6d94350550a0104c1d3a7fa949d6 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 21 Jan 2019 22:00:46 +0800 Subject: [PATCH 185/302] Upgrade gitlab CI config, use alltest.sh --- .gitlab-ci.yml | 12 +----------- alltest.sh | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 11 deletions(-) create mode 100755 alltest.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 82f4ce39f..138ae4c6b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,14 +24,4 @@ before_script: test-my-project: stage: test - script: - - make clean - - make use_all_cores - - go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "common.cover.out" $(go list ./... | grep -v "/vendor/" | grep -v "github.com/CovenantSQL/CovenantSQL/cmd/") - - for package in $(ls cmd/); do go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "integ.${package}.cover.out" "github.com/CovenantSQL/CovenantSQL/cmd/${package}"; done - - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - - bash cleanupDB.sh || true - - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out - - bash <(curl -s https://codecov.io/bash) + script: ./alltest.sh diff --git a/alltest.sh b/alltest.sh new file mode 100755 index 000000000..4ee8d5938 --- /dev/null +++ b/alltest.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o nounset + +test::package() { + local package="${1:-notset}" + + if [[ "${package}" == "notset" ]]; then + &>2 echo "empty package name" + exit 1 + fi + + local coverage_file="${package//\//.}.cover.out" + echo "[TEST] package=${package}, coverage=${coverage_file}" + go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" +} + +main() { + make clean + make use_all_cores + + # test package by package + for package in $(go list ./... | grep -v "/vendor/"); do + test::package "${package}" + done + + # some benchmarks + go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ + bash cleanupDB.sh || true + go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out + bash <(curl -s https://codecov.io/bash) +} + +main "$@" + From cc299f3eab513ca9b94b267a22bae864edbe0c74 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 22 Jan 2019 12:01:44 +0800 Subject: [PATCH 186/302] Fix testnet config missing params. add wait-tx-confirm in transfer token test. --- conf/testnet/config.yaml | 33 +++++++++++++++++++++++++++++++++ test/testnet_client/run.sh | 4 +--- 2 files changed, 34 insertions(+), 3 deletions(-) diff --git a/conf/testnet/config.yaml b/conf/testnet/config.yaml index 2b4df6adf..6716e0b7c 100644 --- a/conf/testnet/config.yaml +++ b/conf/testnet/config.yaml @@ -26,6 +26,30 @@ BlockProducer: b: 0 c: 0 d: 6148914694092305796 + ChainFileName: chain.db + BPGenesisInfo: + Version: 1 + Producer: "0000000000000000000000000000000000000000000000000000000000000001" + MerkleRoot: "0000000000000000000000000000000000000000000000000000000000000001" + ParentHash: "0000000000000000000000000000000000000000000000000000000000000001" + Timestamp: 2019-01-02T13:33:00Z + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 58aceaf4b730b54bf00c0fb3f7b14886de470767f313c2d108968cd8bf0794b7 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 Nonce: @@ -90,3 +114,12 @@ KnownNodes: Addr: "127.0.0.1:4661" PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 Role: Client +QPS: 1000 +ChainBusPeriod: 0s +BillingBlockCount: 60 +BPPeriod: 10s +BPTick: 3s +SQLChainPeriod: 1m0s +SQLChainTick: 10s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 9bf63dbbd..eda9ede95 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -20,9 +20,7 @@ wallet=$(awk '{print $3}' wallet.txt) #transfer some coin to above address ${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ - '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' - -sleep 60 + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm ${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 | tee dsn.txt From 5caa487ada8d7aeb93915ba26fb6ab67d956a595 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 22 Jan 2019 12:02:56 +0800 Subject: [PATCH 187/302] Increase waitTxConfirmationMaxDuration to 20 times BPPeriod. Add waitTxConfirmation in createdb. --- cmd/cql/main.go | 13 ++++++++++++- test/testnet_client/run.sh | 7 +++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 542ab60fa..e4b7874b2 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -246,7 +246,7 @@ func main() { // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. - waitTxConfirmationMaxDuration = 10 * conf.GConf.BPPeriod + waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod if getBalance { var stableCoinBalance, covenantCoinBalance uint64 @@ -334,6 +334,17 @@ func main() { return } + if waitTxConfirmation { + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + err = client.WaitDBCreation(ctx, dsn) + if err != nil { + log.WithError(err).Error("create database failed durating creation") + os.Exit(-1) + return + } + } + log.Infof("the newly created database is: %#v", dsn) fmt.Printf(dsn) return diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index eda9ede95..13b71f7b8 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -22,14 +22,13 @@ wallet=$(awk '{print $3}' wallet.txt) ${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm -${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 | tee dsn.txt +${PROJECT_DIR}/bin/cql -config conf/config.yaml -get-balance + +${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -sleep 60 - -${PROJECT_DIR}/bin/cql -config conf/config.yaml -get-balance ${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' From 12f28afd24f3dcdb2c8aef7e90aa11bf286ac248 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 22 Jan 2019 12:22:26 +0800 Subject: [PATCH 188/302] Add a param 'fast' for GNTE test. --- cmd/cql-minerd/benchGNTE.sh | 39 +++++++++++++++++++++++-------------- test/GNTE/run.sh | 13 +++++++++---- 2 files changed, 33 insertions(+), 19 deletions(-) diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index d7895b6a3..c11f14ce5 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -1,21 +1,30 @@ #!/bin/bash +param=$1 + #make -C ../../ clean && \ #make -C ../../ use_all_cores && \ -go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log -go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +if [ "fast" == "$param" ]; then + go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log + go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log +else + go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log + go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log + + go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log -go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log + go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +fi diff --git a/test/GNTE/run.sh b/test/GNTE/run.sh index 1912ef866..650874de9 100755 --- a/test/GNTE/run.sh +++ b/test/GNTE/run.sh @@ -1,8 +1,13 @@ #!/bin/bash -x -yaml=( - ./scripts/gnte_{0,0.2,5,20,100}ms.yaml -) +param=$1 +if [ "fast" == "$param" ]; then + yaml=./scripts/gnte_0ms.yaml +else + yaml=( + ./scripts/gnte_{0,0.2,5,20,100}ms.yaml + ) +fi TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) @@ -44,7 +49,7 @@ do # Bench GNTE cd ${PROJECT_DIR}/cmd/cql-minerd/ - bash -x ./benchGNTE.sh + bash -x ./benchGNTE.sh $param echo "${gnte_yaml}" >> ${tmp_file} grep BenchmarkMinerGNTE gnte.log >> ${tmp_file} echo "" >> ${tmp_file} From d6721a57b2dfdeb8c8b8d149bbd81fe4857708bf Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 21 Jan 2019 11:31:51 +0800 Subject: [PATCH 189/302] Remove useless cql client mode --- cmd/cql-minerd/main.go | 8 +-- cmd/cqld/client.go | 145 ----------------------------------------- cmd/cqld/main.go | 20 +----- 3 files changed, 7 insertions(+), 166 deletions(-) delete mode 100644 cmd/cqld/client.go diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 4736f4d3e..5cb1d055d 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -105,8 +105,8 @@ func init() { flag.StringVar(&logLevel, "log-level", "", "service log level") flag.Usage = func() { - fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) - fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) + _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) + _, _ = fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) flag.PrintDefaults() } } @@ -160,7 +160,7 @@ func main() { } // init profile, if cpuProfile, memProfile length is 0, nothing will be done - utils.StartProfile(cpuProfile, memProfile) + _ = utils.StartProfile(cpuProfile, memProfile) // set generate key pair config conf.GConf.GenerateKeyPair = genKeyPair @@ -218,7 +218,7 @@ func main() { server.Serve() }() defer func() { - server.Listener.Close() + _ = server.Listener.Close() server.Stop() }() diff --git a/cmd/cqld/client.go b/cmd/cqld/client.go deleted file mode 100644 index 39a18c47c..000000000 --- a/cmd/cqld/client.go +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - "net" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" - "golang.org/x/crypto/ssh/terminal" -) - -func runClient(nodeID proto.NodeID) (err error) { - var idx int - for i, n := range conf.GConf.KnownNodes { - if n.ID == nodeID { - idx = i - break - } - } - - rootPath := conf.GConf.WorkingRoot - pubKeyStorePath := filepath.Join(rootPath, conf.GConf.PubKeyStoreFile) - privateKeyPath := filepath.Join(rootPath, conf.GConf.PrivateKeyFile) - - // read master key - var masterKey []byte - if !conf.GConf.IsTestMode { - fmt.Print("Type in Master key to continue: ") - masterKey, err = terminal.ReadPassword(syscall.Stdin) - if err != nil { - fmt.Printf("Failed to read Master Key: %v", err) - } - fmt.Println("") - } - - err = kms.InitLocalKeyPair(privateKeyPath, masterKey) - if err != nil { - log.WithError(err).Error("init local key pair failed") - return - } - - conf.GConf.KnownNodes[idx].PublicKey, err = kms.GetLocalPublicKey() - if err != nil { - log.WithError(err).Error("get local public key failed") - return - } - //nodeInfo := asymmetric.GetPubKeyNonce(AllNodes[idx].PublicKey, 20, 500*time.Millisecond, nil) - //log.Debugf("client pubkey:\n%x", AllNodes[idx].PublicKey.Serialize()) - //log.Debugf("client nonce:\n%v", nodeInfo) - - // init nodes - log.Info("init peers") - _, _, _, err = initNodePeers(nodeID, pubKeyStorePath) - if err != nil { - return - } - - // do client request - if err = clientRequest(clientOperation, flag.Arg(0)); err != nil { - return - } - - return -} - -func clientRequest(reqType string, sql string) (err error) { - log.SetLevel(log.DebugLevel) - leaderNodeID := kms.BP.NodeID - var conn net.Conn - var client *rpc.Client - - if len(reqType) > 0 && strings.Title(reqType[:1]) == "P" { - if conn, err = rpc.DialToNode(leaderNodeID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - reqType = "Ping" - node1 := proto.NewNode() - node1.InitNodeCryptoInfo(100 * time.Millisecond) - - reqA := &proto.PingReq{ - Node: *node1, - } - - respA := new(proto.PingResp) - log.Debugf("req %#v: %#v", reqType, reqA) - err = client.Call("DHT."+reqType, reqA, respA) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, respA) - } else { - for _, bp := range conf.GConf.KnownNodes { - if bp.Role == proto.Leader || bp.Role == proto.Follower { - if conn, err = rpc.DialToNode(bp.ID, rpc.GetSessionPoolInstance(), false); err != nil { - return - } - if client, err = rpc.InitClientConn(conn); err != nil { - return - } - log.WithField("bp", bp.ID).Debug("calling BP") - reqType = "FindNeighbor" - req := &proto.FindNeighborReq{ - ID: proto.NodeID(flag.Arg(0)), - Count: 10, - } - resp := new(proto.FindNeighborResp) - log.Debugf("req %#v: %#v", reqType, req) - err = client.Call("DHT."+reqType, req, resp) - if err != nil { - log.Fatal(err) - } - log.Debugf("resp %#v: %#v", reqType, resp) - } - } - } - - return -} diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index c1c117ac0..37ccf977c 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -56,9 +56,6 @@ var ( showVersion bool configFile string - clientMode bool - clientOperation string - mode string // "normal", "api" logLevel string ) @@ -76,14 +73,12 @@ func init() { flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") - flag.BoolVar(&clientMode, "client", false, "run as client") - flag.StringVar(&clientOperation, "operation", "FindNeighbor", "client operation") flag.StringVar(&mode, "mode", "normal", "run mode, e.g. normal, api") flag.StringVar(&logLevel, "log-level", "", "service log level") flag.Usage = func() { - fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) - fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) + _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) + _, _ = fmt.Fprintf(os.Stderr, "Usage: %s [arguments]\n", name) flag.PrintDefaults() } } @@ -129,18 +124,9 @@ func main() { } // init profile, if cpuProfile, memProfile length is 0, nothing will be done - utils.StartProfile(cpuProfile, memProfile) + _ = utils.StartProfile(cpuProfile, memProfile) defer utils.StopProfile() - if clientMode { - if err := runClient(conf.GConf.ThisNodeID); err != nil { - log.WithError(err).Fatal("run client failed") - } else { - log.Info("run client success") - } - return - } - if err := runNode(conf.GConf.ThisNodeID, conf.GConf.ListenAddr); err != nil { log.WithError(err).Fatal("run kayak failed") } From 3fe218fcd1f76faf93a989fb2676effc3de00839 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 21 Jan 2019 16:50:41 +0800 Subject: [PATCH 190/302] Add package metric missing files --- metric/loadavg.go | 3 +- metric/metric.go | 17 ---- metric/nodemetricmap.go | 142 ++++++++++++++++++++++++++++++++ metric/nodemetricmap_test.go | 97 ++++++++++++++++++++++ metric/rpc.go | 152 +++++++++++++++++++++++++++++++++++ metric/rpc_test.go | 82 +++++++++++++++++++ 6 files changed, 475 insertions(+), 18 deletions(-) create mode 100644 metric/nodemetricmap.go create mode 100644 metric/nodemetricmap_test.go create mode 100644 metric/rpc.go create mode 100644 metric/rpc_test.go diff --git a/metric/loadavg.go b/metric/loadavg.go index 8519a2e52..4e333dfd2 100644 --- a/metric/loadavg.go +++ b/metric/loadavg.go @@ -19,8 +19,9 @@ package metric import ( "fmt" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/prometheus/client_golang/prometheus" + + "github.com/CovenantSQL/CovenantSQL/utils/log" ) type loadavgCollector struct { diff --git a/metric/metric.go b/metric/metric.go index 36a55eb29..76b21dbfd 100644 --- a/metric/metric.go +++ b/metric/metric.go @@ -24,23 +24,6 @@ import ( "github.com/prometheus/common/version" ) -const ( - // KB is 1024 Bytes - KB int64 = 1024 - // MB is 1024 KB - MB int64 = KB * 1024 - // GB is 1024 MB - GB int64 = MB * 1024 - // TB is 1024 GB - TB int64 = GB * 1024 - // PB is 1024 TB - PB int64 = TB * 1024 - // EB is 1024 PB - EB int64 = TB * 1024 - // ZB is 1024 EB - ZB int64 = TB * 1024 -) - func init() { prometheus.MustRegister(version.NewCollector("CovenantSQL")) } diff --git a/metric/nodemetricmap.go b/metric/nodemetricmap.go new file mode 100644 index 000000000..8d8875570 --- /dev/null +++ b/metric/nodemetricmap.go @@ -0,0 +1,142 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "sync" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" + dto "github.com/prometheus/client_model/go" +) + +// MetricMap is map from metric name to MetricFamily. +type MetricMap map[string]*dto.MetricFamily + +// NodeCrucialMetricMap is map[NodeID][MetricName]Value +type NodeCrucialMetricMap map[proto.NodeID]map[string]float64 + +// FilterFunc is a function that knows how to filter a specific node +// that match the metric limits. if node picked return true else false. +type FilterFunc func(key proto.NodeID, value MetricMap) bool + +// NodeMetricMap is sync.Map version of map[proto.NodeID]MetricMap. +type NodeMetricMap struct { + sync.Map // map[proto.NodeID]MetricMap +} + +// FilterNode return node id slice make filterFunc return true. +func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) { + nodePicker := func(key, value interface{}) bool { + id, ok := key.(proto.NodeID) + if !ok { + return true // continue iteration + } + metrics, ok := value.(MetricMap) + if !ok { + return true // continue iteration + } + if filterFunc(id, metrics) { + ret = append(ret, id) + } + return true + } + nmm.Range(nodePicker) + return +} + +// GetMetrics returns nodes metrics. +func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]MetricMap) { + metrics = make(map[proto.NodeID]MetricMap) + + for _, node := range nodes { + var ok bool + var rawNodeMetrics interface{} + + if rawNodeMetrics, ok = nmm.Load(node); !ok { + continue + } + + var nodeMetrics MetricMap + + if nodeMetrics, ok = rawNodeMetrics.(MetricMap); !ok { + continue + } + + metrics[node] = nodeMetrics + } + + return +} + +// FilterCrucialMetrics filters crucial metrics and also add cpu_count +func (mfm *MetricMap) FilterCrucialMetrics() (ret map[string]float64) { + crucialMetricNameMap := map[string]string{ + "node_memory_MemAvailable_bytes": "mem_avail", + "node_load1": "load1", + "node_load5": "load5", + "node_load15": "load15", + "node_ntp_offset_seconds": "ntp_offset", + "node_filesystem_free_bytes": "fs_avail", + "node_cpu_count": "cpu_count", + } + ret = make(map[string]float64) + for _, v := range *mfm { + if newName, ok := crucialMetricNameMap[*v.Name]; ok { + var metricVal float64 + switch v.GetType() { + case dto.MetricType_GAUGE: + metricVal = v.GetMetric()[0].GetGauge().GetValue() + case dto.MetricType_COUNTER: + metricVal = v.GetMetric()[0].GetCounter().GetValue() + case dto.MetricType_HISTOGRAM: + metricVal = v.GetMetric()[0].GetHistogram().GetBucket()[0].GetUpperBound() + case dto.MetricType_SUMMARY: + metricVal = v.GetMetric()[0].GetSummary().GetQuantile()[0].GetValue() + case dto.MetricType_UNTYPED: + metricVal = v.GetMetric()[0].GetUntyped().GetValue() + default: + continue + } + ret[newName] = metricVal + } + } + log.Debugf("crucial Metric added: %v", ret) + + return +} + +// GetCrucialMetrics gets NodeCrucialMetricMap from NodeMetricMap +func (nmm *NodeMetricMap) GetCrucialMetrics() (ret NodeCrucialMetricMap) { + ret = make(NodeCrucialMetricMap) + metricsPicker := func(key, value interface{}) bool { + nodeID, ok := key.(proto.NodeID) + if !ok { + return true // continue iteration + } + mfm, ok := value.(MetricMap) + if !ok { + return true // continue iteration + } + + ret[nodeID] = mfm.FilterCrucialMetrics() + return true // continue iteration + } + nmm.Range(metricsPicker) + + return +} diff --git a/metric/nodemetricmap_test.go b/metric/nodemetricmap_test.go new file mode 100644 index 000000000..d3dce7c8e --- /dev/null +++ b/metric/nodemetricmap_test.go @@ -0,0 +1,97 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" + dto "github.com/prometheus/client_model/go" + . "github.com/smartystreets/goconvey/convey" +) + +func TestCollectServer_FilterNode(t *testing.T) { + log.SetLevel(log.DebugLevel) + filterTrue := func(key proto.NodeID, value MetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + return true + } + filterFalse := func(key proto.NodeID, value MetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + return false + } + filterMem1MB := func(key proto.NodeID, value MetricMap) bool { + log.Debugf("key: %s, value: %#v", key, value) + var v *dto.MetricFamily + v, ok := value["node_memory_bytes_total"] + if !ok { + v, ok = value["node_memory_MemTotal_bytes"] + } + if ok && len(v.Metric) > 0 && + v.Metric[0].GetGauge() != nil && + v.Metric[0].GetGauge().Value != nil && + *v.Metric[0].GetGauge().Value > float64(1*utils.MB) { + log.Debugf("has memory: %fGB", *v.Metric[0].GetGauge().Value/float64(utils.GB)) + return true + } + + return false + } + Convey("filter node", t, func() { + cc := NewCollectClient() + mfs, _ := cc.Registry.Gather() + mm := make(MetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + nmm := NodeMetricMap{} + nmm.Store(proto.NodeID("node1"), mm) + nmm.Store(proto.NodeID("node2"), nil) + nmm.Store(proto.NodeID("node3"), mm) + So(len(mm), ShouldEqual, len(mfs)) + So(len(mm), ShouldBeGreaterThan, 2) + + ids := nmm.FilterNode(filterTrue) + So(len(ids), ShouldEqual, 2) + + ids1 := nmm.FilterNode(filterMem1MB) + So(len(ids1), ShouldEqual, 2) + + ids2 := nmm.FilterNode(filterFalse) + So(len(ids2), ShouldEqual, 0) + }) + Convey("filter metrics", t, func() { + cc := NewCollectClient() + mfs, _ := cc.Registry.Gather() + mm := make(MetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + nmm := NodeMetricMap{} + nmm.Store(proto.NodeID("node1"), mm) + nmm.Store(proto.NodeID("node2"), nil) + + cmm := nmm.GetCrucialMetrics() + So(len(cmm), ShouldEqual, 1) + So(len(cmm["node1"]), ShouldBeGreaterThanOrEqualTo, 6) + }) + +} diff --git a/metric/rpc.go b/metric/rpc.go new file mode 100644 index 000000000..78aeac315 --- /dev/null +++ b/metric/rpc.go @@ -0,0 +1,152 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "bytes" + "errors" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/common/expfmt" +) + +// MetricServiceName is the RPC name +const MetricServiceName = "Metric" + +// CollectClient is the Metric Collect Client +type CollectClient struct { + Registry *prometheus.Registry +} + +// NewCollectClient returns a new CollectClient +func NewCollectClient() *CollectClient { + reg := StartMetricCollector() + if reg == nil { + log.Fatal("StartMetricCollector failed") + } + + return &CollectClient{ + Registry: reg, + } +} + +// CollectServer is the Metric receiver side +type CollectServer struct { + NodeMetric NodeMetricMap // map[proto.NodeID]MetricMap +} + +// NewCollectServer returns a new CollectServer +func NewCollectServer() *CollectServer { + return &CollectServer{ + NodeMetric: NodeMetricMap{}, + } +} + +// UploadMetrics RPC uploads metric info +func (cs *CollectServer) UploadMetrics(req *proto.UploadMetricsReq, resp *proto.UploadMetricsResp) (err error) { + reqNodeID := req.GetNodeID().ToNodeID() + if reqNodeID.IsEmpty() { + err = errors.New("empty node id") + log.Error(err) + return + } + if !route.IsPermitted(&req.Envelope, route.MetricUploadMetrics) { + err = fmt.Errorf("calling from node %s is not permitted", reqNodeID) + log.Error(err) + return + } + + mfm := make(MetricMap, len(req.MFBytes)) + log.Debugf("RPC received MFS len %d", len(req.MFBytes)) + for _, mf := range req.MFBytes[:] { + bufReader := bytes.NewReader(mf) + //mf := new(dto.MetricFamily) + //dec := expfmt.NewDecoder(bufReader, expfmt.FmtProtoCompact) + //err = dec.Decode(mf) + tp := expfmt.TextParser{} + mf, err := tp.TextToMetricFamilies(bufReader) + if err != nil { + log.Warnf("decode MetricFamily failed: %s", err) + continue + } + //log.Debugf("RPC received MF: %#v", mf) + for k, v := range mf { + mfm[k] = v + } + } + //log.Debugf("MetricFamily uploaded: %v, %v", reqNodeID, mfm) + if len(mfm) > 0 { + cs.NodeMetric.Store(reqNodeID, mfm) + } else { + err = errors.New("no valid metric received") + log.Error(err) + } + return +} + +// GatherMetricBytes gathers the registered metric info and encode it to [][]byte +func (cc *CollectClient) GatherMetricBytes() (mfb [][]byte, err error) { + mfs, err := cc.Registry.Gather() + if err != nil { + log.Errorf("gather metrics failed: %s", err) + return + } + mfb = make([][]byte, 0, len(mfs)) + for _, mf := range mfs[:] { + //log.Debugf("mf: %s", mf.String()) + buf := new(bytes.Buffer) + //enc := expfmt.NewEncoder(buf, expfmt.FmtProtoCompact) + //err = enc.Encode(mf) + _, err := expfmt.MetricFamilyToText(buf, mf) + if err != nil { + log.Warnf("encode MetricFamily failed: %s", err) + continue + } + mfb = append(mfb, buf.Bytes()) + } + if len(mfb) == 0 { + err = errors.New("no valid metric gathered") + } + + return +} + +// UploadMetrics calls RPC UploadMetrics to upload its metric info +func (cc *CollectClient) UploadMetrics(BPNodeID proto.NodeID) (err error) { + mfb, err := cc.GatherMetricBytes() + if err != nil { + log.Errorf("GatherMetricBytes failed: %s", err) + return + } + log.Debugf("calling BP: %s", BPNodeID) + reqType := MetricServiceName + ".UploadMetrics" + req := &proto.UploadMetricsReq{ + MFBytes: mfb, + } + resp := new(proto.UploadMetricsResp) + err = rpc.NewCaller().CallNode(BPNodeID, reqType, req, resp) + if err != nil { + log.Errorf("calling RPC %s failed: %s", reqType, err) + } + log.Debugf("resp %s: %v", reqType, resp) + return +} diff --git a/metric/rpc_test.go b/metric/rpc_test.go new file mode 100644 index 000000000..90c7d7af3 --- /dev/null +++ b/metric/rpc_test.go @@ -0,0 +1,82 @@ +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/consistent" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils/log" + . "github.com/smartystreets/goconvey/convey" +) + +const PubKeyStorePath = "./public.keystore" + +func TestCollectClient_UploadMetrics(t *testing.T) { + defer os.Remove(PubKeyStorePath) + log.SetLevel(log.DebugLevel) + addr := "127.0.0.1:0" + masterKey := []byte("abc") + + cc := NewCollectClient() + cs := NewCollectServer() + + server, err := rpc.NewServerWithService(rpc.ServiceMap{MetricServiceName: cs}) + if err != nil { + log.Fatal(err) + } + + route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), false) + server.InitRPCServer(addr, "../keys/test.key", masterKey) + go server.Serve() + + publicKey, err := kms.GetLocalPublicKey() + nonce := asymmetric.GetPubKeyNonce(publicKey, 10, 100*time.Millisecond, nil) + serverNodeID := proto.NodeID(nonce.Hash.String()) + kms.SetPublicKey(serverNodeID, nonce.Nonce, publicKey) + kms.SetLocalNodeIDNonce(nonce.Hash.CloneBytes(), &nonce.Nonce) + route.SetNodeAddrCache(&proto.RawNodeID{Hash: nonce.Hash}, server.Listener.Addr().String()) + + Convey("get metric and upload by RPC", t, func() { + err = cc.UploadMetrics(serverNodeID) + v, ok := cs.NodeMetric.Load(serverNodeID) + So(ok, ShouldBeTrue) + //log.Debugf("NodeMetric:%#v", v) + + m, _ := v.(MetricMap) + mfb, err := cc.GatherMetricBytes() + So(err, ShouldBeNil) + So(len(m), ShouldEqual, len(mfb)) + So(len(m), ShouldBeGreaterThan, 2) + }) + + Convey("get metric and upload by simply called without node id", t, func() { + req := &proto.UploadMetricsReq{ + MFBytes: nil, + Envelope: proto.Envelope{}, + } + err = cs.UploadMetrics(req, &proto.UploadMetricsResp{}) + So(err, ShouldNotBeNil) + }) +} From 8e56201d7c30415384f5a7d27889181405bd27cc Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 21 Jan 2019 16:51:19 +0800 Subject: [PATCH 191/302] Add MetricUploadMetrics RPC back, but not to upload metric --- route/acl.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/route/acl.go b/route/acl.go index 2e71fe144..8f1245c58 100644 --- a/route/acl.go +++ b/route/acl.go @@ -41,6 +41,9 @@ ACLs: Miner -> Miner, Kayak.Call(): ACL: Open to Miner Leader. + Miner -> BP, Metric.UploadMetrics(): + ACL: Open to Registered Miner + BP -> BP, Exchange NodeInfo, Kayak.Call(): ACL: Open to BP @@ -66,6 +69,8 @@ const ( DHTFindNode // KayakCall is used by BP for data consistency KayakCall + // MetricUploadMetrics uploads node metrics + MetricUploadMetrics // DBSQuery is used by client to read/write database DBSQuery // DBSAck is used by client to send acknowledge to the query response @@ -138,6 +143,8 @@ func (s RemoteFunc) String() string { return "DHT.FindNeighbor" case DHTFindNode: return "DHT.FindNode" + case MetricUploadMetrics: + return "Metric.UploadMetrics" case KayakCall: return "Kayak.Call" case DBSQuery: @@ -213,7 +220,7 @@ func IsPermitted(callerEnvelope *proto.Envelope, funcName RemoteFunc) (ok bool) // non BP switch funcName { // DHT related - case DHTPing, DHTFindNode, DHTFindNeighbor: + case DHTPing, DHTFindNode, DHTFindNeighbor, MetricUploadMetrics: return true // Kayak related case KayakCall: From d11544aef1a78d2df404cb7b25d2618121bdad52 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:13:23 +0800 Subject: [PATCH 192/302] Add metric web in metric pkg --- metric/metricweb.go | 93 ++++++++++++++++++++++++++++++++++++++++ metric/metricweb_test.go | 31 ++++++++++++++ 2 files changed, 124 insertions(+) create mode 100644 metric/metricweb.go create mode 100644 metric/metricweb_test.go diff --git a/metric/metricweb.go b/metric/metricweb.go new file mode 100644 index 000000000..97318356a --- /dev/null +++ b/metric/metricweb.go @@ -0,0 +1,93 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package metric + +import ( + "expvar" + "net/http" + "runtime" + "time" + + "github.com/pkg/errors" + mw "github.com/zserge/metric" + + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func collect(cc *CollectClient) (err error) { + mfs, err := cc.Registry.Gather() + if err != nil { + err = errors.Wrap(err, "gathering node metrics failed") + return + } + mm := make(MetricMap, 0) + for _, mf := range mfs { + mm[*mf.Name] = mf + log.Debugf("gathered node: %v", mf) + } + crucialMetrics := mm.FilterCrucialMetrics() + for k, v := range crucialMetrics { + var val expvar.Var + if val = expvar.Get(k); val == nil { + expvar.Publish(k, mw.NewGauge("1h1m")) + val = expvar.Get(k) + } + val.(mw.Metric).Add(v) + } + + return +} + +// InitMetricWeb initializes the /debug/metrics web +func InitMetricWeb(metricWeb string) (err error) { + // Some Go internal metrics + expvar.Publish("go:numgoroutine", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:numcgocall", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:alloc", mw.NewGauge("1m1s", "5m5s", "1h1m")) + expvar.Publish("go:alloctotal", mw.NewGauge("1m1s", "5m5s", "1h1m")) + + // start period provide service transaction generator + // start prometheus collector + cc := NewCollectClient() + err = collect(cc) + if err != nil { + return + } + + go func() { + for range time.Tick(time.Minute) { + _ = collect(cc) + } + }() + + go func() { + for range time.Tick(5 * time.Second) { + m := &runtime.MemStats{} + runtime.ReadMemStats(m) + expvar.Get("go:numgoroutine").(mw.Metric).Add(float64(runtime.NumGoroutine())) + expvar.Get("go:numcgocall").(mw.Metric).Add(float64(runtime.NumCgoCall())) + expvar.Get("go:alloc").(mw.Metric).Add(float64(m.Alloc) / float64(utils.MB)) + expvar.Get("go:alloctotal").(mw.Metric).Add(float64(m.TotalAlloc) / float64(utils.MB)) + } + }() + http.Handle("/debug/metrics", mw.Handler(mw.Exposed)) + go func() { + _ = http.ListenAndServe(metricWeb, nil) + }() + return +} diff --git a/metric/metricweb_test.go b/metric/metricweb_test.go new file mode 100644 index 000000000..1213960ed --- /dev/null +++ b/metric/metricweb_test.go @@ -0,0 +1,31 @@ +package metric + +import ( + "fmt" + "net/http" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/CovenantSQL/CovenantSQL/utils" +) + +func TestInitMetricWeb(t *testing.T) { + Convey("init metric web", t, func() { + ports, err := utils.GetRandomPorts("127.0.0.1", 1025, 60000, 1) + So(err, ShouldBeNil) + addr := fmt.Sprintf("127.0.0.1:%d", ports[0]) + err = InitMetricWeb(addr) + So(err, ShouldBeNil) + time.Sleep(7 * time.Second) + resp, err := http.Get("http://" + addr + "/debug/metrics") + So(err, ShouldBeNil) + buf := make([]byte, 40960) + _, err = resp.Body.Read(buf) + So(err, ShouldBeNil) + So(string(buf), ShouldContainSubstring, "cpu_count") + So(string(buf), ShouldContainSubstring, "fs_avail") + So(string(buf), ShouldContainSubstring, "go:alloc") + }) +} From 689e1e744dbb820a38e8fa7f16cef500d6b5c998 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:13:56 +0800 Subject: [PATCH 193/302] Capital all flag message --- cmd/cql/main.go | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 07db14704..956a1a8de 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -203,27 +203,27 @@ func init() { Override: "", }) - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&command, "command", "", "run only single command (SQL or usql internal command) and exit") - flag.StringVar(&fileName, "file", "", "execute commands from file and exit") + flag.StringVar(&dsn, "dsn", "", "Database url") + flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") + flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&noRC, "no-rc", false, "do not read start up file") + flag.BoolVar(&noRC, "no-rc", false, "Do not read start up file") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&outFile, "out", "", "output file") - flag.StringVar(&configFile, "config", "config.yaml", "config file for covenantsql") - flag.StringVar(&password, "password", "", "master key password for covenantsql") - flag.BoolVar(&singleTransaction, "single-transaction", false, "execute as a single transaction (if non-interactive)") - flag.Var(&variables, "variable", "set variable") + flag.StringVar(&outFile, "out", "", "Record stdout to file") + flag.StringVar(&configFile, "config", "config.yaml", "Config file for covenantsql") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") + flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") + flag.Var(&variables, "variable", "Set variable") // DML flags - flag.StringVar(&createDB, "create", "", "create database, argument can be instance requirement json or simply a node count requirement") - flag.StringVar(&dropDB, "drop", "", "drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") - flag.StringVar(&updatePermission, "update-perm", "", "update user's permission on specific sqlchain") - flag.StringVar(&transferToken, "transfer", "", "transfer token to target account") - flag.BoolVar(&getBalance, "get-balance", false, "get balance of current account") - flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "get specific token's balance of current account, e.g. Particle, Wave, and etc.") - flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "wait for transaction confirmation") + flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") + flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") + flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") + flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") + flag.BoolVar(&getBalance, "get-balance", false, "Get balance of current account") + flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") + flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") } func main() { From e4ea4e6b454f650812b9655a4c06c66b07c42c2f Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:15:40 +0800 Subject: [PATCH 194/302] Add metric-web flag --- cmd/cql-minerd/integration_test.go | 6 ++++++ cmd/cql-minerd/main.go | 15 +++++++++++++-- cmd/cqld/bootstrap.go | 1 - cmd/cqld/main.go | 19 +++++++++++++++---- 4 files changed, 34 insertions(+), 7 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 7746d8384..f275e376b 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -87,6 +87,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/leader.cover.out"), + "-metric-web", "0.0.0.0:13122", }, "leader", testWorkingDir, logDir, true, ); err == nil { @@ -98,6 +99,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower1.cover.out"), + "-metric-web", "0.0.0.0:13121", }, "follower1", testWorkingDir, logDir, false, ); err == nil { @@ -109,6 +111,7 @@ func startNodes() { FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/follower2.cover.out"), + "-metric-web", "0.0.0.0:13120", }, "follower2", testWorkingDir, logDir, false, ); err == nil { @@ -149,6 +152,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner0.cover.out"), + "-metric-web", "0.0.0.0:12144", }, "miner0", testWorkingDir, logDir, true, ); err == nil { @@ -162,6 +166,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner1.cover.out"), + "-metric-web", "0.0.0.0:12145", }, "miner1", testWorkingDir, logDir, false, ); err == nil { @@ -175,6 +180,7 @@ func startNodes() { FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-test.coverprofile", FJ(baseDir, "./cmd/cql-minerd/miner2.cover.out"), + "-metric-web", "0.0.0.0:12146", }, "miner2", testWorkingDir, logDir, false, ); err == nil { diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 5cb1d055d..62e9f1739 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -70,6 +70,7 @@ var ( configFile string genKeyPair bool metricLog bool + metricWeb string // profile cpuProfile string @@ -101,8 +102,10 @@ func init() { flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") flag.StringVar(&metricGraphite, "metric-graphite-server", "", "Metric graphite server to push metrics") - flag.StringVar(&traceFile, "trace-file", "", "trace profile") - flag.StringVar(&logLevel, "log-level", "", "service log level") + flag.StringVar(&metricWeb, "metric-web", "", "Address and port to get internal metrics") + + flag.StringVar(&traceFile, "trace-file", "", "Trace profile") + flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.Usage = func() { _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) @@ -186,6 +189,14 @@ func main() { }() } + if len(metricWeb) > 0 { + err = metric.InitMetricWeb(metricWeb) + if err != nil { + log.Errorf("start metric web server on %s failed: %v", metricWeb, err) + os.Exit(-1) + } + } + // start period provide service transaction generator go func() { // start prometheus collector diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 420f8a710..1281da5ef 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -156,7 +156,6 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { defer chain.Stop() log.Info(conf.StartSucceedMessage) - //go periodicPingBlockProducer() signalCh := make(chan os.Signal, 1) signal.Notify( diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 37ccf977c..bcf5fca26 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -27,6 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -50,6 +51,7 @@ var ( // profile cpuProfile string memProfile string + metricWeb string // other noLogo bool @@ -72,9 +74,10 @@ func init() { flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") + flag.StringVar(&metricWeb, "metric-web", "", "Address and port to get internal metrics") - flag.StringVar(&mode, "mode", "normal", "run mode, e.g. normal, api") - flag.StringVar(&logLevel, "log-level", "", "service log level") + flag.StringVar(&mode, "mode", "normal", "Run mode, e.g. normal, api") + flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.Usage = func() { _, _ = fmt.Fprintf(os.Stderr, "\n%s\n\n", desc) @@ -85,7 +88,8 @@ func init() { func initLogs() { log.Infof("%#v starting, version %#v, commit %#v, branch %#v", name, version, commit, branch) - log.Infof("%#v, target architecture is %#v, operating system target is %#v", runtime.Version(), runtime.GOARCH, runtime.GOOS) + log.Infof("%#v, target architecture is %#v, operating system target is %#v", + runtime.Version(), runtime.GOARCH, runtime.GOOS) log.Infof("role: %#v", conf.RoleTag) } @@ -113,7 +117,7 @@ func main() { kms.InitBP() log.Debugf("config:\n%#v", conf.GConf) - // BP DO NOT Generate new key pair + // BP Never Generate new key pair conf.GConf.GenerateKeyPair = false // init log @@ -123,6 +127,13 @@ func main() { fmt.Print(logo) } + if len(metricWeb) > 0 { + err = metric.InitMetricWeb(metricWeb) + if err != nil { + log.Errorf("start metric web server on %s failed: %v", metricWeb, err) + os.Exit(-1) + } + } // init profile, if cpuProfile, memProfile length is 0, nothing will be done _ = utils.StartProfile(cpuProfile, memProfile) defer utils.StopProfile() From e1532792650f9feb11e588ef42cc6ffec11d99e4 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:16:27 +0800 Subject: [PATCH 195/302] Add block height to expvar --- blockproducer/chain.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index be25dd3b2..a1516c91d 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -18,12 +18,15 @@ package blockproducer import ( "context" + "expvar" "fmt" "math" "os" "sync" "time" + mw "github.com/zserge/metric" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/chainbus" "github.com/CovenantSQL/CovenantSQL/conf" @@ -80,6 +83,11 @@ type Chain struct { // NewChain creates a new blockchain. func NewChain(cfg *Config) (c *Chain, err error) { + // Normally, NewChain() should only be called once in app. + // So, we just check expvar without a lock + if expvar.Get("height") == nil { + expvar.Publish("height", mw.NewGauge("5m1s")) + } return NewChainWithContext(context.Background(), cfg) } @@ -361,6 +369,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { }).Warn("too much time elapsed in the new period, skip this block") return } + expvar.Get("height").(mw.Metric).Add(float64(c.getNextHeight())) log.WithField("height", c.getNextHeight()).Info("producing a new block") if err := c.produceBlock(now); err != nil { log.WithField("now", now.Format(time.RFC3339Nano)).WithError(err).Errorln( From 7c9a429bbff98011568e0cfd2a291cd446e08fc5 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:16:57 +0800 Subject: [PATCH 196/302] Add RPC call metric expvar --- rpc/rpcutil.go | 49 ++++++++++++++++++++++++++++++++++++++++++++++++- utils/big.go | 17 +++++++++++++++++ 2 files changed, 65 insertions(+), 1 deletion(-) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 4873ea8aa..113d643d4 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -18,12 +18,14 @@ package rpc import ( "context" + "expvar" "io" "math/rand" "net" "net/rpc" "strings" "sync" + "time" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" @@ -31,6 +33,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" mux "github.com/xtaci/smux" + mw "github.com/zserge/metric" ) var ( @@ -41,6 +44,8 @@ var ( currentBP proto.NodeID // currentBPLock represents the chief block producer access lock. currentBPLock sync.Mutex + // callRPCExpvarLock is the lock of RPC Call Publish lock + callRPCExpvarLock sync.Mutex ) // PersistentCaller is a wrapper for session pooling and RPC calling. @@ -84,6 +89,11 @@ func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { // Call invokes the named function, waits for it to complete, and returns its error status. func (c *PersistentCaller) Call(method string, args interface{}, reply interface{}) (err error) { + startTime := time.Now() + defer func() { + recordRPCCost(startTime, method, err) + }() + err = c.initClient(method == route.DHTPing.String()) if err != nil { err = errors.Wrap(err, "init PersistentCaller client failed") @@ -124,7 +134,7 @@ func (c *PersistentCaller) CloseStream() { if c.client.Conn != nil { stream, ok := c.client.Conn.(*mux.Stream) if ok { - stream.Close() + _ = stream.Close() } } c.client.Close() @@ -155,9 +165,46 @@ func (c *Caller) CallNode( return c.CallNodeWithContext(context.Background(), node, method, args, reply) } +func recordRPCCost(startTime time.Time, method string, err error) { + var ( + name, name_c string + val, val_c expvar.Var + ) + costTime := time.Since(startTime) + if err == nil { + name = "t_succ:" + method + name_c = "c_succ:" + method + } else { + name = "t_fail:" + method + name_c = "c_fail:" + method + } + // Optimistically, val will not be nil except the first Call of method + // expvar uses sync.Map + // So, we try it first without lock + if val = expvar.Get(name); val == nil { + callRPCExpvarLock.Lock() + val = expvar.Get(name) + if val == nil { + expvar.Publish(name, mw.NewHistogram("10s1s", "1m5s", "1h1m")) + expvar.Publish(name_c, mw.NewCounter("10s1s", "1h1m")) + } + callRPCExpvarLock.Unlock() + val = expvar.Get(name) + } + val.(mw.Metric).Add(costTime.Seconds()) + val_c = expvar.Get(name_c) + val_c.(mw.Metric).Add(1) + return +} + // CallNodeWithContext invokes the named function, waits for it to complete or context timeout, and returns its error status. func (c *Caller) CallNodeWithContext( ctx context.Context, node proto.NodeID, method string, args interface{}, reply interface{}) (err error) { + startTime := time.Now() + defer func() { + recordRPCCost(startTime, method, err) + }() + conn, err := DialToNode(node, c.pool, method == route.DHTPing.String()) if err != nil { err = errors.Wrapf(err, "dial to node %s failed", node) diff --git a/utils/big.go b/utils/big.go index 7c4556073..f1e84e9b8 100644 --- a/utils/big.go +++ b/utils/big.go @@ -22,6 +22,23 @@ import ( "math/big" ) +const ( + // KB is 1024 Bytes + KB int64 = 1024 + // MB is 1024 KB + MB int64 = KB * 1024 + // GB is 1024 MB + GB int64 = MB * 1024 + // TB is 1024 GB + TB int64 = GB * 1024 + // PB is 1024 TB + PB int64 = TB * 1024 + // EB is 1024 PB + EB int64 = TB * 1024 + // ZB is 1024 EB + ZB int64 = TB * 1024 +) + // Various big integer limit values. var ( tt255 = BigPow(2, 255) From 7b8db5f22ec8da3268895f9b62baebc97e58a27b Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:22:01 +0800 Subject: [PATCH 197/302] Add vendor/github.com/zserge/metric@master --- Gopkg.lock | 10 + Gopkg.toml | 4 + vendor/github.com/zserge/metric/LICENSE | 21 ++ vendor/github.com/zserge/metric/README.md | 57 +++ vendor/github.com/zserge/metric/go.mod | 1 + vendor/github.com/zserge/metric/handler.go | 197 +++++++++++ vendor/github.com/zserge/metric/metric.go | 385 +++++++++++++++++++++ 7 files changed, 675 insertions(+) create mode 100644 vendor/github.com/zserge/metric/LICENSE create mode 100644 vendor/github.com/zserge/metric/README.md create mode 100644 vendor/github.com/zserge/metric/go.mod create mode 100644 vendor/github.com/zserge/metric/handler.go create mode 100644 vendor/github.com/zserge/metric/metric.go diff --git a/Gopkg.lock b/Gopkg.lock index a6d747eb8..665093ca8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -593,6 +593,14 @@ revision = "94e385923345495c4add5c23df1efc6d66964479" version = "v1.0" +[[projects]] + branch = "master" + digest = "1:95be927b2ec224dfd5357bdd9a8f588299779812525d9aeb0740a3713a5a2560" + name = "github.com/zserge/metric" + packages = ["."] + pruneopts = "UT" + revision = "5a5d84c90520d706b4805b4a5cca4c57998868e0" + [[projects]] branch = "master" digest = "1:cfd661f1a52594117f2a753bb640a86d4dbf3e0d778c2641bfbc750e6a1c8be7" @@ -672,6 +680,7 @@ "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_model/go", + "github.com/prometheus/common/expfmt", "github.com/prometheus/common/version", "github.com/prometheus/procfs", "github.com/rcrowley/go-metrics", @@ -693,6 +702,7 @@ "github.com/xo/usql/rline", "github.com/xo/usql/text", "github.com/xtaci/smux", + "github.com/zserge/metric", "golang.org/x/crypto/ed25519", "golang.org/x/crypto/ssh/terminal", "golang.org/x/sys/unix", diff --git a/Gopkg.toml b/Gopkg.toml index 9df96a495..0f7048f56 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -61,6 +61,10 @@ name = "github.com/CovenantSQL/xurls" branch = "master" +[[override]] + name = "github.com/zserge/metric" + branch = "master" + [[override]] name = "github.com/xtaci/smux" branch = "master" diff --git a/vendor/github.com/zserge/metric/LICENSE b/vendor/github.com/zserge/metric/LICENSE new file mode 100644 index 000000000..79a518d45 --- /dev/null +++ b/vendor/github.com/zserge/metric/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Serge Zaitsev + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/zserge/metric/README.md b/vendor/github.com/zserge/metric/README.md new file mode 100644 index 000000000..bbfac3e92 --- /dev/null +++ b/vendor/github.com/zserge/metric/README.md @@ -0,0 +1,57 @@ +# metric + +[![Build Status](https://travis-ci.org/zserge/metric.svg?branch=master)](https://travis-ci.org/zserge/metric) +[![GoDoc](https://godoc.org/github.com/zserge/metric?status.svg)](https://godoc.org/github.com/zserge/metric) +[![Go Report Card](https://goreportcard.com/badge/github.com/zserge/metric)](https://goreportcard.com/report/github.com/zserge/metric) + +Package provides simple uniform interface for metrics such as counters, +gauges and histograms. It keeps track of metrics in runtime and can be used for +some basic web service instrumentation in Go, where complex tools such as +Prometheus or InfluxDB are not required. + +It is compatible with [expvar](https://golang.org/pkg/expvar/) package, that is +also commonly used for monitoring. + +## Usage + +```go +// Create new metric. All metrics may take time frames if you want them to keep +// history. If no time frames are given the metric only keeps track of a single +// current value. +c := metric.NewCounter("15m10s") // 15 minutes of history with 10 second precision +// Increment counter +c.Add(1) +// Return JSON with all recorded counter values +c.String() // Or json.Marshal(c) + +// With expvar + +// Register a metric +expvar.Publish("latency", metric.NewHistogram("5m1s", "15m30s", "1h1m")) +// Register HTTP handler to visualize metrics +http.Handle("/debug/metrics", metric.Handler(metric.Exposed)) + +// Measure time and update the metric +start := time.Now() +... +expvar.Get("latency").(metric.Metric).Add(time.Since(start).Seconds()) +``` + +Metrics are thread-safe and can be updated from background goroutines. + +## Web UI + +Nothing fancy, really, but still better than reading plain JSON. No javascript, +only good old HTML, CSS and SVG. + +![web ui](example/screenshot.png) + +Of course you may customize a list of metrics to show in the web UI. + +If you need precise values - you may use `/debug/vars` HTTP endpoint provided +by `expvar`. + +## License + +Code is distributed under MIT license, feel free to use it in your proprietary +projects as well. diff --git a/vendor/github.com/zserge/metric/go.mod b/vendor/github.com/zserge/metric/go.mod new file mode 100644 index 000000000..0e4037d36 --- /dev/null +++ b/vendor/github.com/zserge/metric/go.mod @@ -0,0 +1 @@ +module github.com/zserge/metric diff --git a/vendor/github.com/zserge/metric/handler.go b/vendor/github.com/zserge/metric/handler.go new file mode 100644 index 000000000..b83632cbb --- /dev/null +++ b/vendor/github.com/zserge/metric/handler.go @@ -0,0 +1,197 @@ +package metric + +import ( + "encoding/json" + "expvar" + "fmt" + "net/http" + "sort" + "strings" + + "html/template" +) + +var ( + page = template.Must(template.New(""). + Funcs(template.FuncMap{"path": path, "duration": duration}). + Parse(` + + +Metrics report + + + +
+

    __          __
+.--------..-----.|  |_ .----.|__|.----..-----.
+|        ||  -__||   _||   _||  ||  __||__ --|
+|__|__|__||_____||____||__|  |__||____||_____|
+
+
+

+{{ range . }} +
+

{{ .name }}

+
+ {{ if .type }} +
+ {{ template "table" . }} +
+
+ {{ else if .interval }} +
{{ template "timeseries" . }}
+ {{ else if .metrics}} + {{ range .metrics }} +
+ {{ template "timeseries" . }} +
+ {{ end }} + {{ end }} +
+
+{{ end }} +
+ + +{{ define "table" }} +
+ {{ if eq .type "c" }} + + {{ else if eq .type "g" }} + + + {{ else if eq .type "h" }} + + + {{ end }} +
count
{{ printf "%.2g" .count }}
meanminmax
{{printf "%.2g" .mean}}{{printf "%.2g" .min}}{{printf "%.2g" .max}}
P.50P.90P.99
{{printf "%.2g" .p50}}{{printf "%.2g" .p90}}{{printf "%.2g" .p99}}
+{{ end }} +{{ define "timeseries" }} + {{ template "table" .total }} +
+
+
{{ duration .samples .interval }}
+ + {{ if eq (index (index .samples 0) "type") "c" }} + {{ range (path .samples "count") }}{{end}} + {{ else if eq (index (index .samples 0) "type") "g" }} + {{ range (path .samples "min" "max" "mean" ) }}{{end}} + {{ else if eq (index (index .samples 0) "type") "h" }} + {{ range (path .samples "p50" "p90" "p99") }}{{end}} + {{ end }} + +
+
+{{ end }} +`)) +) + +func path(samples []interface{}, keys ...string) []string { + var min, max float64 + paths := make([]string, len(keys), len(keys)) + for i := 0; i < len(samples); i++ { + s := samples[i].(map[string]interface{}) + for _, k := range keys { + x := s[k].(float64) + if i == 0 || x < min { + min = x + } + if i == 0 || x > max { + max = x + } + } + } + for i := 0; i < len(samples); i++ { + s := samples[i].(map[string]interface{}) + for j, k := range keys { + v := s[k].(float64) + x := float64(i+1) / float64(len(samples)) + y := (v - min) / (max - min) + if max == min { + y = 0 + } + if i == 0 { + paths[j] = fmt.Sprintf("M%f %f", 0.0, (1-y)*18+1) + } + paths[j] += fmt.Sprintf(" L%f %f", x*100, (1-y)*18+1) + } + } + return paths +} + +func duration(samples []interface{}, n float64) string { + n = n * float64(len(samples)) + if n < 60 { + return fmt.Sprintf("%d sec", int(n)) + } else if n < 60*60 { + return fmt.Sprintf("%d min", int(n/60)) + } else if n < 24*60*60 { + return fmt.Sprintf("%d hrs", int(n/60/60)) + } + return fmt.Sprintf("%d days", int(n/24/60/60)) +} + +// Handler returns an http.Handler that renders web UI for all provided metrics. +func Handler(snapshot func() map[string]Metric) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + type h map[string]interface{} + metrics := []h{} + for name, metric := range snapshot() { + m := h{} + b, _ := json.Marshal(metric) + json.Unmarshal(b, &m) + m["name"] = name + metrics = append(metrics, m) + } + sort.Slice(metrics, func(i, j int) bool { + n1 := metrics[i]["name"].(string) + n2 := metrics[j]["name"].(string) + return strings.Compare(n1, n2) < 0 + }) + page.Execute(w, metrics) + }) +} + +// Exposed returns a map of exposed metrics (see expvar package). +func Exposed() map[string]Metric { + m := map[string]Metric{} + expvar.Do(func(kv expvar.KeyValue) { + if metric, ok := kv.Value.(Metric); ok { + m[kv.Key] = metric + } + }) + return m +} diff --git a/vendor/github.com/zserge/metric/metric.go b/vendor/github.com/zserge/metric/metric.go new file mode 100644 index 000000000..658fef09c --- /dev/null +++ b/vendor/github.com/zserge/metric/metric.go @@ -0,0 +1,385 @@ +package metric + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// To mock time in tests +var now = time.Now + +// Metric is a single meter (counter, gauge or histogram, optionally - with history) +type Metric interface { + Add(n float64) + String() string +} + +// metric is an extended private interface with some additional internal +// methods used by timeseries. Counters, gauges and histograms implement it. +type metric interface { + Metric + Reset() + Aggregate(roll int, samples []metric) +} + +var _, _, _ metric = &counter{}, &gauge{}, &histogram{} + +// NewCounter returns a counter metric that increments the value with each +// incoming number. +func NewCounter(frames ...string) Metric { + return newMetric(func() metric { return &counter{} }, frames...) +} + +// NewGauge returns a gauge metric that sums up the incoming values and returns +// mean/min/max of the resulting distribution. +func NewGauge(frames ...string) Metric { + return newMetric(func() metric { return &gauge{} }, frames...) +} + +// NewHistogram returns a histogram metric that calculates 50%, 90% and 99% +// percentiles of the incoming numbers. +func NewHistogram(frames ...string) Metric { + return newMetric(func() metric { return &histogram{} }, frames...) +} + +type timeseries struct { + sync.Mutex + now time.Time + size int + interval time.Duration + total metric + samples []metric +} + +func (ts *timeseries) Reset() { + ts.total.Reset() + for _, s := range ts.samples { + s.Reset() + } +} + +func (ts *timeseries) roll() { + t := now() + roll := int((t.Round(ts.interval).Sub(ts.now.Round(ts.interval))) / ts.interval) + ts.now = t + n := len(ts.samples) + if roll <= 0 { + return + } + if roll >= len(ts.samples) { + ts.Reset() + } else { + for i := 0; i < roll; i++ { + tmp := ts.samples[n-1] + for j := n - 1; j > 0; j-- { + ts.samples[j] = ts.samples[j-1] + } + ts.samples[0] = tmp + ts.samples[0].Reset() + } + ts.total.Aggregate(roll, ts.samples) + } +} + +func (ts *timeseries) Add(n float64) { + ts.Lock() + defer ts.Unlock() + ts.roll() + ts.total.Add(n) + ts.samples[0].Add(n) +} + +func (ts *timeseries) MarshalJSON() ([]byte, error) { + ts.Lock() + defer ts.Unlock() + ts.roll() + return json.Marshal(struct { + Interval float64 `json:"interval"` + Total Metric `json:"total"` + Samples []metric `json:"samples"` + }{float64(ts.interval) / float64(time.Second), ts.total, ts.samples}) +} + +func (ts *timeseries) String() string { + ts.Lock() + defer ts.Unlock() + ts.roll() + return ts.total.String() +} + +type multimetric []*timeseries + +func (mm multimetric) Add(n float64) { + for _, m := range mm { + m.Add(n) + } +} + +func (mm multimetric) MarshalJSON() ([]byte, error) { + b := []byte(`{"metrics":[`) + for i, m := range mm { + if i != 0 { + b = append(b, ',') + } + x, _ := json.Marshal(m) + b = append(b, x...) + } + b = append(b, ']', '}') + return b, nil +} + +func (mm multimetric) String() string { + return mm[len(mm)-1].String() +} + +type counter struct { + count uint64 +} + +func (c *counter) String() string { return strconv.FormatFloat(c.value(), 'g', -1, 64) } +func (c *counter) Reset() { atomic.StoreUint64(&c.count, math.Float64bits(0)) } +func (c *counter) value() float64 { return math.Float64frombits(atomic.LoadUint64(&c.count)) } +func (c *counter) Add(n float64) { + for { + old := math.Float64frombits(atomic.LoadUint64(&c.count)) + new := old + n + if atomic.CompareAndSwapUint64(&c.count, math.Float64bits(old), math.Float64bits(new)) { + return + } + } +} +func (c *counter) MarshalJSON() ([]byte, error) { + return json.Marshal(struct { + Type string `json:"type"` + Count float64 `json:"count"` + }{"c", c.value()}) +} + +func (c *counter) Aggregate(roll int, samples []metric) { + c.Reset() + for _, s := range samples { + c.Add(s.(*counter).value()) + } +} + +type gauge struct { + sync.Mutex + value float64 + sum float64 + min float64 + max float64 + count int +} + +func (g *gauge) String() string { return strconv.FormatFloat(g.value, 'g', -1, 64) } +func (g *gauge) Reset() { + g.Lock() + defer g.Unlock() + g.value, g.count, g.sum, g.min, g.max = 0, 0, 0, 0, 0 +} +func (g *gauge) Add(n float64) { + g.Lock() + defer g.Unlock() + if n < g.min || g.count == 0 { + g.min = n + } + if n > g.max || g.count == 0 { + g.max = n + } + g.value = n + g.sum += n + g.count++ +} +func (g *gauge) MarshalJSON() ([]byte, error) { + g.Lock() + defer g.Unlock() + return json.Marshal(struct { + Type string `json:"type"` + Value float64 `json:"value"` + Mean float64 `json:"mean"` + Min float64 `json:"min"` + Max float64 `json:"max"` + }{"g", g.value, g.mean(), g.min, g.max}) +} +func (g *gauge) mean() float64 { + if g.count == 0 { + return 0 + } + return g.sum / float64(g.count) +} +func (g *gauge) Aggregate(roll int, samples []metric) { + g.Reset() + g.Lock() + defer g.Unlock() + for i := len(samples) - 1; i >= 0; i-- { + s := samples[i].(*gauge) + s.Lock() + if s.count == 0 { + s.Unlock() + continue + } + if g.min > s.min || g.count == 0 { + g.min = s.min + } + if g.max < s.max || g.count == 0 { + g.max = s.max + } + g.count += s.count + g.sum += s.sum + g.value = s.value + s.Unlock() + } +} + +const maxBins = 100 + +type bin struct { + value float64 + count float64 +} + +type histogram struct { + sync.Mutex + bins []bin + total float64 +} + +func (h *histogram) String() string { + return fmt.Sprintf(`{"p50":%g,"p90":%g,"p99":%g}`, h.quantile(0.5), h.quantile(0.9), h.quantile(0.99)) +} + +func (h *histogram) Reset() { + h.Lock() + defer h.Unlock() + h.bins = nil + h.total = 0 +} + +func (h *histogram) Add(n float64) { + h.Lock() + defer h.Unlock() + defer h.trim() + h.total = h.total + 1 + newbin := bin{value: n, count: 1} + for i := range h.bins { + if h.bins[i].value > n { + h.bins = append(h.bins[:i], append([]bin{newbin}, h.bins[i:]...)...) + return + } + } + + h.bins = append(h.bins, newbin) +} + +func (h *histogram) MarshalJSON() ([]byte, error) { + h.Lock() + defer h.Unlock() + return json.Marshal(struct { + Type string `json:"type"` + P50 float64 `json:"p50"` + P90 float64 `json:"p90"` + P99 float64 `json:"p99"` + }{"h", h.quantile(0.5), h.quantile(0.9), h.quantile(0.99)}) +} + +func (h *histogram) trim() { + for len(h.bins) > maxBins { + d := float64(0) + i := 0 + for j := 1; j < len(h.bins); j++ { + if dv := h.bins[j].value - h.bins[j-1].value; dv < d || j == 1 { + d = dv + i = j + } + } + count := h.bins[i-1].count + h.bins[i].count + merged := bin{ + value: (h.bins[i-1].value*h.bins[i-1].count + h.bins[i].value*h.bins[i].count) / count, + count: count, + } + h.bins = append(h.bins[:i-1], h.bins[i:]...) + h.bins[i-1] = merged + } +} + +func (h *histogram) bin(q float64) bin { + count := q * h.total + for i := range h.bins { + count -= float64(h.bins[i].count) + if count <= 0 { + return h.bins[i] + } + } + return bin{} +} + +func (h *histogram) quantile(q float64) float64 { + return h.bin(q).value +} + +func (h *histogram) Aggregate(roll int, samples []metric) { + h.Lock() + defer h.Unlock() + alpha := 2 / float64(len(samples)+1) + h.total = 0 + for i := range h.bins { + h.bins[i].count = h.bins[i].count * math.Pow(1-alpha, float64(roll)) + h.total = h.total + h.bins[i].count + } +} + +func newTimeseries(builder func() metric, frame string) *timeseries { + var ( + totalNum, intervalNum int + totalUnit, intervalUnit rune + ) + units := map[rune]time.Duration{ + 's': time.Second, + 'm': time.Minute, + 'h': time.Hour, + 'd': time.Hour * 24, + 'w': time.Hour * 24 * 7, + 'M': time.Hour * 24 * 7 * 30, + 'y': time.Hour * 24 * 7 * 365, + } + fmt.Sscanf(frame, "%d%c%d%c", &totalNum, &totalUnit, &intervalNum, &intervalUnit) + interval := units[intervalUnit] * time.Duration(intervalNum) + if interval == 0 { + interval = time.Minute + } + totalDuration := units[totalUnit] * time.Duration(totalNum) + if totalDuration == 0 { + totalDuration = interval * 15 + } + n := int(totalDuration / interval) + samples := make([]metric, n, n) + for i := 0; i < n; i++ { + samples[i] = builder() + } + totalMetric := builder() + return ×eries{interval: interval, total: totalMetric, samples: samples} +} + +func newMetric(builder func() metric, frames ...string) Metric { + if len(frames) == 0 { + return builder() + } + if len(frames) == 1 { + return newTimeseries(builder, frames[0]) + } + mm := multimetric{} + for _, frame := range frames { + mm = append(mm, newTimeseries(builder, frame)) + } + sort.Slice(mm, func(i, j int) bool { + a, b := mm[i], mm[j] + return a.interval.Seconds()*float64(len(a.samples)) < b.interval.Seconds()*float64(len(b.samples)) + }) + return mm +} From 3705eb30e36539827027c413ebeecd311cf2c356 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:34:53 +0800 Subject: [PATCH 198/302] Make golint happy --- metric/metricweb.go | 2 +- metric/nodemetricmap.go | 24 ++++++++++++------------ metric/nodemetricmap_test.go | 10 +++++----- metric/rpc.go | 4 ++-- metric/rpc_test.go | 2 +- rpc/rpcutil.go | 14 +++++++------- 6 files changed, 28 insertions(+), 28 deletions(-) diff --git a/metric/metricweb.go b/metric/metricweb.go index 97318356a..cca54f4b0 100644 --- a/metric/metricweb.go +++ b/metric/metricweb.go @@ -35,7 +35,7 @@ func collect(cc *CollectClient) (err error) { err = errors.Wrap(err, "gathering node metrics failed") return } - mm := make(MetricMap, 0) + mm := make(metricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) diff --git a/metric/nodemetricmap.go b/metric/nodemetricmap.go index 8d8875570..8405c79b3 100644 --- a/metric/nodemetricmap.go +++ b/metric/nodemetricmap.go @@ -24,19 +24,19 @@ import ( dto "github.com/prometheus/client_model/go" ) -// MetricMap is map from metric name to MetricFamily. -type MetricMap map[string]*dto.MetricFamily +// metricMap is map from metric name to MetricFamily. +type metricMap map[string]*dto.MetricFamily // NodeCrucialMetricMap is map[NodeID][MetricName]Value type NodeCrucialMetricMap map[proto.NodeID]map[string]float64 // FilterFunc is a function that knows how to filter a specific node // that match the metric limits. if node picked return true else false. -type FilterFunc func(key proto.NodeID, value MetricMap) bool +type FilterFunc func(key proto.NodeID, value metricMap) bool -// NodeMetricMap is sync.Map version of map[proto.NodeID]MetricMap. +// NodeMetricMap is sync.Map version of map[proto.NodeID]metricMap. type NodeMetricMap struct { - sync.Map // map[proto.NodeID]MetricMap + sync.Map // map[proto.NodeID]metricMap } // FilterNode return node id slice make filterFunc return true. @@ -46,7 +46,7 @@ func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) if !ok { return true // continue iteration } - metrics, ok := value.(MetricMap) + metrics, ok := value.(metricMap) if !ok { return true // continue iteration } @@ -60,8 +60,8 @@ func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) } // GetMetrics returns nodes metrics. -func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]MetricMap) { - metrics = make(map[proto.NodeID]MetricMap) +func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]metricMap) { + metrics = make(map[proto.NodeID]metricMap) for _, node := range nodes { var ok bool @@ -71,9 +71,9 @@ func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.No continue } - var nodeMetrics MetricMap + var nodeMetrics metricMap - if nodeMetrics, ok = rawNodeMetrics.(MetricMap); !ok { + if nodeMetrics, ok = rawNodeMetrics.(metricMap); !ok { continue } @@ -84,7 +84,7 @@ func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.No } // FilterCrucialMetrics filters crucial metrics and also add cpu_count -func (mfm *MetricMap) FilterCrucialMetrics() (ret map[string]float64) { +func (mfm *metricMap) FilterCrucialMetrics() (ret map[string]float64) { crucialMetricNameMap := map[string]string{ "node_memory_MemAvailable_bytes": "mem_avail", "node_load1": "load1", @@ -128,7 +128,7 @@ func (nmm *NodeMetricMap) GetCrucialMetrics() (ret NodeCrucialMetricMap) { if !ok { return true // continue iteration } - mfm, ok := value.(MetricMap) + mfm, ok := value.(metricMap) if !ok { return true // continue iteration } diff --git a/metric/nodemetricmap_test.go b/metric/nodemetricmap_test.go index d3dce7c8e..59ce5e4cb 100644 --- a/metric/nodemetricmap_test.go +++ b/metric/nodemetricmap_test.go @@ -28,15 +28,15 @@ import ( func TestCollectServer_FilterNode(t *testing.T) { log.SetLevel(log.DebugLevel) - filterTrue := func(key proto.NodeID, value MetricMap) bool { + filterTrue := func(key proto.NodeID, value metricMap) bool { log.Debugf("key: %s, value: %#v", key, value) return true } - filterFalse := func(key proto.NodeID, value MetricMap) bool { + filterFalse := func(key proto.NodeID, value metricMap) bool { log.Debugf("key: %s, value: %#v", key, value) return false } - filterMem1MB := func(key proto.NodeID, value MetricMap) bool { + filterMem1MB := func(key proto.NodeID, value metricMap) bool { log.Debugf("key: %s, value: %#v", key, value) var v *dto.MetricFamily v, ok := value["node_memory_bytes_total"] @@ -56,7 +56,7 @@ func TestCollectServer_FilterNode(t *testing.T) { Convey("filter node", t, func() { cc := NewCollectClient() mfs, _ := cc.Registry.Gather() - mm := make(MetricMap, 0) + mm := make(metricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) @@ -80,7 +80,7 @@ func TestCollectServer_FilterNode(t *testing.T) { Convey("filter metrics", t, func() { cc := NewCollectClient() mfs, _ := cc.Registry.Gather() - mm := make(MetricMap, 0) + mm := make(metricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) diff --git a/metric/rpc.go b/metric/rpc.go index 78aeac315..bab29ac66 100644 --- a/metric/rpc.go +++ b/metric/rpc.go @@ -51,7 +51,7 @@ func NewCollectClient() *CollectClient { // CollectServer is the Metric receiver side type CollectServer struct { - NodeMetric NodeMetricMap // map[proto.NodeID]MetricMap + NodeMetric NodeMetricMap // map[proto.NodeID]metricMap } // NewCollectServer returns a new CollectServer @@ -75,7 +75,7 @@ func (cs *CollectServer) UploadMetrics(req *proto.UploadMetricsReq, resp *proto. return } - mfm := make(MetricMap, len(req.MFBytes)) + mfm := make(metricMap, len(req.MFBytes)) log.Debugf("RPC received MFS len %d", len(req.MFBytes)) for _, mf := range req.MFBytes[:] { bufReader := bytes.NewReader(mf) diff --git a/metric/rpc_test.go b/metric/rpc_test.go index 90c7d7af3..a0ed6fdc3 100644 --- a/metric/rpc_test.go +++ b/metric/rpc_test.go @@ -64,7 +64,7 @@ func TestCollectClient_UploadMetrics(t *testing.T) { So(ok, ShouldBeTrue) //log.Debugf("NodeMetric:%#v", v) - m, _ := v.(MetricMap) + m, _ := v.(metricMap) mfb, err := cc.GatherMetricBytes() So(err, ShouldBeNil) So(len(m), ShouldEqual, len(mfb)) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 113d643d4..10b7a4650 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -167,16 +167,16 @@ func (c *Caller) CallNode( func recordRPCCost(startTime time.Time, method string, err error) { var ( - name, name_c string - val, val_c expvar.Var + name, nameC string + val, valC expvar.Var ) costTime := time.Since(startTime) if err == nil { name = "t_succ:" + method - name_c = "c_succ:" + method + nameC = "c_succ:" + method } else { name = "t_fail:" + method - name_c = "c_fail:" + method + nameC = "c_fail:" + method } // Optimistically, val will not be nil except the first Call of method // expvar uses sync.Map @@ -186,14 +186,14 @@ func recordRPCCost(startTime time.Time, method string, err error) { val = expvar.Get(name) if val == nil { expvar.Publish(name, mw.NewHistogram("10s1s", "1m5s", "1h1m")) - expvar.Publish(name_c, mw.NewCounter("10s1s", "1h1m")) + expvar.Publish(nameC, mw.NewCounter("10s1s", "1h1m")) } callRPCExpvarLock.Unlock() val = expvar.Get(name) } val.(mw.Metric).Add(costTime.Seconds()) - val_c = expvar.Get(name_c) - val_c.(mw.Metric).Add(1) + valC = expvar.Get(nameC) + valC.(mw.Metric).Add(1) return } From 6d9029526f307d8704aa5b5b9ad852c9d031dce4 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 15:41:45 +0800 Subject: [PATCH 199/302] Make golint happy --- metric/metricweb.go | 2 +- metric/nodemetricmap.go | 24 ++++++++++++------------ metric/nodemetricmap_test.go | 10 +++++----- metric/rpc.go | 4 ++-- metric/rpc_test.go | 2 +- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/metric/metricweb.go b/metric/metricweb.go index cca54f4b0..8bbc092d1 100644 --- a/metric/metricweb.go +++ b/metric/metricweb.go @@ -35,7 +35,7 @@ func collect(cc *CollectClient) (err error) { err = errors.Wrap(err, "gathering node metrics failed") return } - mm := make(metricMap, 0) + mm := make(SimpleMetricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) diff --git a/metric/nodemetricmap.go b/metric/nodemetricmap.go index 8405c79b3..5f0757918 100644 --- a/metric/nodemetricmap.go +++ b/metric/nodemetricmap.go @@ -24,19 +24,19 @@ import ( dto "github.com/prometheus/client_model/go" ) -// metricMap is map from metric name to MetricFamily. -type metricMap map[string]*dto.MetricFamily +// SimpleMetricMap is map from metric name to MetricFamily. +type SimpleMetricMap map[string]*dto.MetricFamily // NodeCrucialMetricMap is map[NodeID][MetricName]Value type NodeCrucialMetricMap map[proto.NodeID]map[string]float64 // FilterFunc is a function that knows how to filter a specific node // that match the metric limits. if node picked return true else false. -type FilterFunc func(key proto.NodeID, value metricMap) bool +type FilterFunc func(key proto.NodeID, value SimpleMetricMap) bool -// NodeMetricMap is sync.Map version of map[proto.NodeID]metricMap. +// NodeMetricMap is sync.Map version of map[proto.NodeID]SimpleMetricMap. type NodeMetricMap struct { - sync.Map // map[proto.NodeID]metricMap + sync.Map // map[proto.NodeID]SimpleMetricMap } // FilterNode return node id slice make filterFunc return true. @@ -46,7 +46,7 @@ func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) if !ok { return true // continue iteration } - metrics, ok := value.(metricMap) + metrics, ok := value.(SimpleMetricMap) if !ok { return true // continue iteration } @@ -60,8 +60,8 @@ func (nmm *NodeMetricMap) FilterNode(filterFunc FilterFunc) (ret []proto.NodeID) } // GetMetrics returns nodes metrics. -func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]metricMap) { - metrics = make(map[proto.NodeID]metricMap) +func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.NodeID]SimpleMetricMap) { + metrics = make(map[proto.NodeID]SimpleMetricMap) for _, node := range nodes { var ok bool @@ -71,9 +71,9 @@ func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.No continue } - var nodeMetrics metricMap + var nodeMetrics SimpleMetricMap - if nodeMetrics, ok = rawNodeMetrics.(metricMap); !ok { + if nodeMetrics, ok = rawNodeMetrics.(SimpleMetricMap); !ok { continue } @@ -84,7 +84,7 @@ func (nmm *NodeMetricMap) GetMetrics(nodes []proto.NodeID) (metrics map[proto.No } // FilterCrucialMetrics filters crucial metrics and also add cpu_count -func (mfm *metricMap) FilterCrucialMetrics() (ret map[string]float64) { +func (mfm *SimpleMetricMap) FilterCrucialMetrics() (ret map[string]float64) { crucialMetricNameMap := map[string]string{ "node_memory_MemAvailable_bytes": "mem_avail", "node_load1": "load1", @@ -128,7 +128,7 @@ func (nmm *NodeMetricMap) GetCrucialMetrics() (ret NodeCrucialMetricMap) { if !ok { return true // continue iteration } - mfm, ok := value.(metricMap) + mfm, ok := value.(SimpleMetricMap) if !ok { return true // continue iteration } diff --git a/metric/nodemetricmap_test.go b/metric/nodemetricmap_test.go index 59ce5e4cb..bb3d78bd8 100644 --- a/metric/nodemetricmap_test.go +++ b/metric/nodemetricmap_test.go @@ -28,15 +28,15 @@ import ( func TestCollectServer_FilterNode(t *testing.T) { log.SetLevel(log.DebugLevel) - filterTrue := func(key proto.NodeID, value metricMap) bool { + filterTrue := func(key proto.NodeID, value SimpleMetricMap) bool { log.Debugf("key: %s, value: %#v", key, value) return true } - filterFalse := func(key proto.NodeID, value metricMap) bool { + filterFalse := func(key proto.NodeID, value SimpleMetricMap) bool { log.Debugf("key: %s, value: %#v", key, value) return false } - filterMem1MB := func(key proto.NodeID, value metricMap) bool { + filterMem1MB := func(key proto.NodeID, value SimpleMetricMap) bool { log.Debugf("key: %s, value: %#v", key, value) var v *dto.MetricFamily v, ok := value["node_memory_bytes_total"] @@ -56,7 +56,7 @@ func TestCollectServer_FilterNode(t *testing.T) { Convey("filter node", t, func() { cc := NewCollectClient() mfs, _ := cc.Registry.Gather() - mm := make(metricMap, 0) + mm := make(SimpleMetricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) @@ -80,7 +80,7 @@ func TestCollectServer_FilterNode(t *testing.T) { Convey("filter metrics", t, func() { cc := NewCollectClient() mfs, _ := cc.Registry.Gather() - mm := make(metricMap, 0) + mm := make(SimpleMetricMap, 0) for _, mf := range mfs { mm[*mf.Name] = mf log.Debugf("gathered node: %v", mf) diff --git a/metric/rpc.go b/metric/rpc.go index bab29ac66..30977a5bf 100644 --- a/metric/rpc.go +++ b/metric/rpc.go @@ -51,7 +51,7 @@ func NewCollectClient() *CollectClient { // CollectServer is the Metric receiver side type CollectServer struct { - NodeMetric NodeMetricMap // map[proto.NodeID]metricMap + NodeMetric NodeMetricMap // map[proto.NodeID]SimpleMetricMap } // NewCollectServer returns a new CollectServer @@ -75,7 +75,7 @@ func (cs *CollectServer) UploadMetrics(req *proto.UploadMetricsReq, resp *proto. return } - mfm := make(metricMap, len(req.MFBytes)) + mfm := make(SimpleMetricMap, len(req.MFBytes)) log.Debugf("RPC received MFS len %d", len(req.MFBytes)) for _, mf := range req.MFBytes[:] { bufReader := bytes.NewReader(mf) diff --git a/metric/rpc_test.go b/metric/rpc_test.go index a0ed6fdc3..d90f185e8 100644 --- a/metric/rpc_test.go +++ b/metric/rpc_test.go @@ -64,7 +64,7 @@ func TestCollectClient_UploadMetrics(t *testing.T) { So(ok, ShouldBeTrue) //log.Debugf("NodeMetric:%#v", v) - m, _ := v.(metricMap) + m, _ := v.(SimpleMetricMap) mfb, err := cc.GatherMetricBytes() So(err, ShouldBeNil) So(len(m), ShouldEqual, len(mfb)) From 926273855b489136948c6f85f926a15ba61039e0 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 22 Jan 2019 15:53:25 +0800 Subject: [PATCH 200/302] Format tab --- route/acl.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/route/acl.go b/route/acl.go index 8f1245c58..b21d6470c 100644 --- a/route/acl.go +++ b/route/acl.go @@ -41,8 +41,8 @@ ACLs: Miner -> Miner, Kayak.Call(): ACL: Open to Miner Leader. - Miner -> BP, Metric.UploadMetrics(): - ACL: Open to Registered Miner + Miner -> BP, Metric.UploadMetrics(): + ACL: Open to Registered Miner BP -> BP, Exchange NodeInfo, Kayak.Call(): ACL: Open to BP From d694942dd8ae2db8709403dc635db5ca248aa6f0 Mon Sep 17 00:00:00 2001 From: zeqing-guo Date: Tue, 22 Jan 2019 16:11:56 +0800 Subject: [PATCH 201/302] Modify wait interval --- cmd/cql/main.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 431577e2d..887489d65 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -109,6 +109,11 @@ func init() { // set command name of usql text.CommandName = "covenantsql" + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have + // enough informations from config to do that currently, so just use a fixed and long enough + // duration. + waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod + // register SQLite3 database drivers.Register("sqlite3", drivers.Driver{ AllowMultilineComments: true, @@ -177,7 +182,7 @@ func init() { log.Infof("connecting to %#v", url.DSN) // wait for database to become ready - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) defer cancel() if err = client.WaitDBCreation(ctx, dsn); err != nil { return @@ -245,11 +250,6 @@ func main() { return } - // TODO(leventeliu): discover more specific confirmation duration from config. We don't have - // enough informations from config to do that currently, so just use a fixed and long enough - // duration. - waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod - if getBalance { var stableCoinBalance, covenantCoinBalance uint64 From fda0fb6c7edb50f55647b22831fa9bb9b0d6a13a Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 16:27:04 +0800 Subject: [PATCH 202/302] Add -metric-web "${METRIC_WEB_ADDR}" in docker-compose --- bin/docker-entry.sh | 6 +++--- docker-compose.yml | 12 ++++++++++++ 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 4e760879b..13d43b7ce 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -1,15 +1,15 @@ #!/bin/sh -echo nameserver 1.1.1.1 > /etc/resolv.conf +#echo nameserver 1.1.1.1 > /etc/resolv.conf [ -s "${COVENANT_ALERT}" ] && [ -x "${COVENANT_ALERT}" ] && (eval "${COVENANT_ALERT}") case "${COVENANT_ROLE}" in miner) - exec /app/cql-minerd -config "${COVENANT_CONF}" "${@}" + exec /app/cql-minerd -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; blockproducer) - exec /app/cqld -config "${COVENANT_CONF}" "${@}" + exec /app/cqld -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; observer) MAGIC_DOLLAR='$' envsubst < /etc/nginx/conf.d/servers/explorer.conf.template > /etc/nginx/conf.d/default.conf diff --git a/docker-compose.yml b/docker-compose.yml index 592c67902..028a4deba 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -7,9 +7,11 @@ services: restart: always ports: - "11099:4661" + - "12099:4665" environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_0/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_0/:/app/node_0/ networks: @@ -26,9 +28,11 @@ services: restart: always ports: - "11100:4661" + - "12100:4665" environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_1/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_1/:/app/node_1/ networks: @@ -45,9 +49,11 @@ services: restart: always ports: - "11101:4661" + - "12101:4665" environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_2/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_2/:/app/node_2/ networks: @@ -64,9 +70,11 @@ services: restart: always ports: - "11102:4661" + - "12102:4665" environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_0/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_miner_0/:/app/node_miner_0/ networks: @@ -83,9 +91,11 @@ services: restart: always ports: - "11103:4661" + - "12103:4665" environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_1/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_miner_1/:/app/node_miner_1/ networks: @@ -102,9 +112,11 @@ services: restart: always ports: - "11104:4661" + - "12104:4665" environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_2/config.yaml + METRIC_WEB_ADDR: "0.0.0.0:4665" volumes: - ./test/service/node_miner_2/:/app/node_miner_2/ networks: From 817397b037d72ef63373ff89d1e45e72755f6610 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 22 Jan 2019 16:31:50 +0800 Subject: [PATCH 203/302] Move usql register to independent func. --- cmd/cql/main.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 887489d65..11dcea29a 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -105,15 +105,10 @@ func (v *varsFlag) Set(value string) error { return nil } -func init() { +func usqlRegister() { // set command name of usql text.CommandName = "covenantsql" - // TODO(leventeliu): discover more specific confirmation duration from config. We don't have - // enough informations from config to do that currently, so just use a fixed and long enough - // duration. - waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod - // register SQLite3 database drivers.Register("sqlite3", drivers.Driver{ AllowMultilineComments: true, @@ -209,6 +204,9 @@ func init() { Aliases: []string{}, Override: "", }) +} + +func init() { flag.StringVar(&dsn, "dsn", "", "database url") flag.StringVar(&command, "command", "", "run only single command (SQL or usql internal command) and exit") @@ -250,6 +248,13 @@ func main() { return } + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have + // enough informations from config to do that currently, so just use a fixed and long enough + // duration. + waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod + + usqlRegister() + if getBalance { var stableCoinBalance, covenantCoinBalance uint64 From 770108c13323c8c765116b83c836f338a2162057 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 17:13:21 +0800 Subject: [PATCH 204/302] Use iota for KB, MB... --- utils/big.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/utils/big.go b/utils/big.go index f1e84e9b8..2aa8bb2b0 100644 --- a/utils/big.go +++ b/utils/big.go @@ -23,20 +23,21 @@ import ( ) const ( + _ = iota // KB is 1024 Bytes - KB int64 = 1024 + KB int64 = 1 << (10 * iota) // MB is 1024 KB - MB int64 = KB * 1024 + MB // GB is 1024 MB - GB int64 = MB * 1024 + GB // TB is 1024 GB - TB int64 = GB * 1024 + TB // PB is 1024 TB - PB int64 = TB * 1024 + PB // EB is 1024 PB - EB int64 = TB * 1024 + EB // ZB is 1024 EB - ZB int64 = TB * 1024 + ZB ) // Various big integer limit values. From 310c17e9b710377d1e4489e338faa704da7d3abe Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 17:23:25 +0800 Subject: [PATCH 205/302] Fix ZB which will overflow int64 --- utils/big.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/utils/big.go b/utils/big.go index 2aa8bb2b0..0504b2596 100644 --- a/utils/big.go +++ b/utils/big.go @@ -36,8 +36,6 @@ const ( PB // EB is 1024 PB EB - // ZB is 1024 EB - ZB ) // Various big integer limit values. From 23ca23b763016296244064df4bf2ab44aa52d8ed Mon Sep 17 00:00:00 2001 From: Ggicci Date: Tue, 22 Jan 2019 17:48:25 +0800 Subject: [PATCH 206/302] Upgrade .gitlab-ci.yml --- .gitlab-ci.yml | 12 +----------- alltest.sh | 9 +++++---- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bfa109f6d..a8ac0214e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,17 +24,7 @@ before_script: test-my-project: stage: test - script: - # - ./alltest.sh - - make clean - - make -j6 bp miner observer - - go test -v -race -failfast -parallel 16 -cpu 16 $(go list ./... | grep -v "/vendor/") -coverprofile cover.out - - gocovmerge cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f cover.out - - bash <(curl -s https://codecov.io/bash) - - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ - - bash cleanupDB.sh || true - - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - bash cleanupDB.sh || true + script: ./alltest.sh compatibility-testnet: stage: test diff --git a/alltest.sh b/alltest.sh index 4ee8d5938..b2f74d45f 100755 --- a/alltest.sh +++ b/alltest.sh @@ -19,20 +19,21 @@ test::package() { main() { make clean - make use_all_cores + make -j6 bp miner observer # test package by package for package in $(go list ./... | grep -v "/vendor/"); do test::package "${package}" done + gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out + bash <(curl -s https://codecov.io/bash) + # some benchmarks go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ bash cleanupDB.sh || true go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out - bash <(curl -s https://codecov.io/bash) + bash cleanupDB.sh || true } main "$@" From aa3d6846288577b470a32b30236cd5f6c7cdf913 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Tue, 22 Jan 2019 17:49:29 +0800 Subject: [PATCH 207/302] Format gocode --- cmd/cqld/main.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index a73597ad3..75205e5f1 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -29,10 +29,10 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" @@ -64,10 +64,10 @@ var ( showVersion bool configFile string - wsapiAddr string - mode string // "normal", "api" + wsapiAddr string + mode string // "normal", "api" - logLevel string + logLevel string ) const name = `cqld` From 8db435d8047d2c4b5f24a98f68c05312d6ac3060 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 18:31:23 +0800 Subject: [PATCH 208/302] Use 114 DNS for default, docker default DNS will make runtime crash in net._C2func_getaddrinfo. Dig it later --- bin/docker-entry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 13d43b7ce..cb7619ed8 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -1,6 +1,6 @@ #!/bin/sh -#echo nameserver 1.1.1.1 > /etc/resolv.conf +echo nameserver 114.114.114.114 > /etc/resolv.conf [ -s "${COVENANT_ALERT}" ] && [ -x "${COVENANT_ALERT}" ] && (eval "${COVENANT_ALERT}") From 095d424d3ede972813e39ae15849ffaa8e1c5ad3 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 22 Jan 2019 20:44:19 +0800 Subject: [PATCH 209/302] GetCurrentBP also return BP follower --- rpc/rpcutil.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 10b7a4650..2c4763e0c 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -356,8 +356,7 @@ func GetCurrentBP() (bpNodeID proto.NodeID, err error) { ID: localNodeID, Roles: []proto.ServerRole{ proto.Leader, - // only leader is capable of allocating database in current implementation - //proto.Follower, + proto.Follower, }, Count: 1, } From 811f161cf7f3f6863f56642a4aa380caca933ef2 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Wed, 23 Jan 2019 16:21:42 +0800 Subject: [PATCH 210/302] Remove mode flag and other redundant code 1. remove mode flag from cqld command; 2. type define bp.RunMode, don't use string representation; 3. move registerNodeToBP to rpcutils.go; 4. remove redundant calls to registerNodeToBP. --- blockproducer/chain.go | 6 ++--- blockproducer/config.go | 16 ++++++++++-- cmd/cql-minerd/node.go | 56 +-------------------------------------- cmd/cqld/bootstrap.go | 21 +++++++++++---- cmd/cqld/main.go | 58 +---------------------------------------- rpc/rpcutil.go | 52 ++++++++++++++++++++++++++++++++++++ 6 files changed, 87 insertions(+), 122 deletions(-) diff --git a/blockproducer/chain.go b/blockproducer/chain.go index 2f676f3a2..8b8def50a 100644 --- a/blockproducer/chain.go +++ b/blockproducer/chain.go @@ -78,7 +78,7 @@ type Chain struct { headBranch *branch branches []*branch txPool map[hash.Hash]pi.Transaction - mode string + mode RunMode } // NewChain creates a new blockchain. @@ -191,7 +191,7 @@ func NewChainWithContext(ctx context.Context, cfg *Config) (c *Chain, err error) } // Setup peer list - if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers, cfg.Mode == "api"); err != nil { + if localBPInfo, bpInfos, err = buildBlockProducerInfos(cfg.NodeID, cfg.Peers, cfg.Mode == APINodeMode); err != nil { return } if t = cfg.ConfirmThreshold; t <= 0.0 { @@ -356,7 +356,7 @@ func (c *Chain) advanceNextHeight(now time.Time, d time.Duration) { defer c.increaseNextHeight() // Skip if it's not my turn - if c.mode == "api" || !c.isMyTurn() { + if c.mode == APINodeMode || !c.isMyTurn() { return } // Normally, a block producing should start right after the new period, but more time may also diff --git a/blockproducer/config.go b/blockproducer/config.go index 1cb6b03dc..d6f5befb7 100644 --- a/blockproducer/config.go +++ b/blockproducer/config.go @@ -24,9 +24,21 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) +// RunMode defines modes that a bp can run as. +type RunMode int + +const ( + // BPMode is the default and normal mode. + BPMode RunMode = iota + + // APINodeMode makes the bp behaviour like an API gateway. It becomes an API + // node, who syncs data from the bp network and exposes JSON-RPC API to users. + APINodeMode +) + // Config is the main chain configuration. type Config struct { - Mode string + Mode RunMode Genesis *types.BPBlock DataFile string @@ -46,7 +58,7 @@ func NewConfig(genesis *types.BPBlock, dataFile string, server *rpc.Server, peers *proto.Peers, nodeID proto.NodeID, period time.Duration, tick time.Duration) *Config { config := Config{ - Mode: "normal", + Mode: BPMode, Genesis: genesis, DataFile: dataFile, Server: server, diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index a859440ff..dd6580523 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -19,18 +19,14 @@ package main import ( "fmt" "os" - "strings" "syscall" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" - "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" "golang.org/x/crypto/ssh/terminal" ) @@ -56,7 +52,7 @@ func initNode() (server *rpc.Server, err error) { // init kms routing route.InitKMS(conf.GConf.PubKeyStoreFile) - err = registerNodeToBP(30 * time.Second) + err = rpc.RegisterNodeToBP(30 * time.Second) if err != nil { log.Fatalf("register node to BP failed: %v", err) } @@ -83,53 +79,3 @@ func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, list return } - -func registerNodeToBP(timeout time.Duration) (err error) { - // get local node id - localNodeID, err := kms.GetLocalNodeID() - if err != nil { - err = errors.Wrap(err, "register node to BP") - return - } - - // get local node info - localNodeInfo, err := kms.GetNodeInfo(localNodeID) - if err != nil { - err = errors.Wrap(err, "register node to BP") - return - } - - log.WithField("node", localNodeInfo).Debug("construct local node info") - - pingWaitCh := make(chan proto.NodeID) - bpNodeIDs := route.GetBPs() - for _, bpNodeID := range bpNodeIDs { - go func(ch chan proto.NodeID, id proto.NodeID) { - for { - err := rpc.PingBP(localNodeInfo, id) - if err == nil { - log.Infof("ping BP succeed: %v", localNodeInfo) - ch <- id - return - } - if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { - log.Debug("stop ping non leader BP node") - return - } - - log.Warnf("ping BP failed: %v", err) - time.Sleep(3 * time.Second) - } - }(pingWaitCh, bpNodeID) - } - - select { - case bp := <-pingWaitCh: - close(pingWaitCh) - log.WithField("BP", bp).Infof("ping BP succeed") - case <-time.After(timeout): - return errors.New("ping BP timeout") - } - - return -} diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 1e15f12dc..b93eb8617 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -79,8 +79,13 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { return } - if mode == "api" { - if err = registerNodeToBP(30 * time.Second); err != nil { + mode := bp.BPMode + if wsapiAddr != "" { + mode = bp.APINodeMode + } + + if mode == bp.APINodeMode { + if err = rpc.RegisterNodeToBP(30 * time.Second); err != nil { log.WithError(err).Fatal("register node to BP") return } @@ -105,7 +110,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { server.Stop() }() - if mode == "normal" { + if mode == bp.BPMode { // init storage log.Info("init storage") var st *LocalStorage @@ -169,8 +174,14 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { log.Info(conf.StartSucceedMessage) // start json-rpc server - log.Info("wsapi: start service") - go api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName) + if mode == bp.APINodeMode { + log.Info("wsapi: start service") + go func() { + if err := api.Serve(wsapiAddr, conf.GConf.BP.ChainFileName); err != nil { + log.WithError(err).Error("wsapi: start service") + } + }() + } signalCh := make(chan os.Signal, 1) signal.Notify( diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 75205e5f1..68c0a19f5 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -22,20 +22,14 @@ import ( "math/rand" "os" "runtime" - "strings" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/metric" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/pkg/errors" ) const logo = ` @@ -65,7 +59,6 @@ var ( configFile string wsapiAddr string - mode string // "normal", "api" logLevel string ) @@ -84,8 +77,7 @@ func init() { flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") flag.StringVar(&metricWeb, "metric-web", "", "Address and port to get internal metrics") - flag.StringVar(&wsapiAddr, "wsapi", "", "Address of the websocket JSON-RPC API") - flag.StringVar(&mode, "mode", "normal", "Run mode, e.g. normal, api") + flag.StringVar(&wsapiAddr, "wsapi", "", "Address of the websocket JSON-RPC API, run as API Node") flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.Usage = func() { @@ -153,51 +145,3 @@ func main() { log.Info("server stopped") } - -func registerNodeToBP(timeout time.Duration) (err error) { - // get local node id - localNodeID, err := kms.GetLocalNodeID() - if err != nil { - return errors.WithMessage(err, "get local node id") - } - - // get local node info - localNodeInfo, err := kms.GetNodeInfo(localNodeID) - if err != nil { - return errors.WithMessage(err, "get local node info") - } - - log.WithField("node", localNodeInfo).Debug("construct local node info") - - pingWaitCh := make(chan proto.NodeID) - bpNodeIDs := route.GetBPs() - for _, bpNodeID := range bpNodeIDs { - go func(ch chan proto.NodeID, id proto.NodeID) { - for { - err := rpc.PingBP(localNodeInfo, id) - if err == nil { - log.WithField("node", localNodeInfo).Info("ping BP node") - ch <- id - return - } - if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { - log.Debug("stop ping non-leader BP node") - return - } - - log.WithField("node", localNodeInfo).WithError(err).Error("ping BP node") - time.Sleep(3 * time.Second) - } - }(pingWaitCh, bpNodeID) - } - - select { - case bp := <-pingWaitCh: - close(pingWaitCh) - log.WithField("BP", bp).Infof("ping BP node") - case <-time.After(timeout): - return errors.New("ping BP timeout") - } - - return -} diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 10b7a4650..147a32726 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -28,6 +28,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -401,3 +402,54 @@ func RequestBP(method string, req interface{}, resp interface{}) (err error) { } return NewCaller().CallNode(bp, method, req, resp) } + +// RegisterNodeToBP registers the current node to bp network. +func RegisterNodeToBP(timeout time.Duration) (err error) { + // get local node id + localNodeID, err := kms.GetLocalNodeID() + if err != nil { + err = errors.Wrap(err, "register node to BP") + return + } + + // get local node info + localNodeInfo, err := kms.GetNodeInfo(localNodeID) + if err != nil { + err = errors.Wrap(err, "register node to BP") + return + } + + log.WithField("node", localNodeInfo).Debug("construct local node info") + + pingWaitCh := make(chan proto.NodeID) + bpNodeIDs := route.GetBPs() + for _, bpNodeID := range bpNodeIDs { + go func(ch chan proto.NodeID, id proto.NodeID) { + for { + err := PingBP(localNodeInfo, id) + if err == nil { + log.Infof("ping BP succeed: %v", localNodeInfo) + ch <- id + return + } + if strings.Contains(err.Error(), kt.ErrNotLeader.Error()) { + log.Debug("stop ping non leader BP node") + return + } + + log.Warnf("ping BP failed: %v", err) + time.Sleep(3 * time.Second) + } + }(pingWaitCh, bpNodeID) + } + + select { + case bp := <-pingWaitCh: + close(pingWaitCh) + log.WithField("BP", bp).Infof("ping BP succeed") + case <-time.After(timeout): + return errors.New("ping BP timeout") + } + + return +} From 7e8cde11a3896677419511b50000f6553fd0f3ae Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 16:27:47 +0800 Subject: [PATCH 211/302] Add osusergo build tag --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 54ab5bef7..34a2a0e4a 100644 --- a/Makefile +++ b/Makefile @@ -126,8 +126,8 @@ endif version := $(branch)-$(GIT_COMMIT)-$(builddate) -tags := $(platform) sqlite_omit_load_extension -testtags := $(platform) sqlite_omit_load_extension testbinary +tags := $(platform) sqlite_omit_load_extension osusergo +testtags := $(tags) testbinary test_flags := -coverpkg github.com/CovenantSQL/CovenantSQL/... -cover -race -c ldflags_role_bp := -X main.version=$(version) -X github.com/CovenantSQL/CovenantSQL/conf.RoleTag=B $$GOLDFLAGS From 15ea0a0df68f92574dae1619cd51dbaf4776c3db Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 16:40:08 +0800 Subject: [PATCH 212/302] Add HomeDirExpand func in utils for home dir(~) expand --- utils/path.go | 14 ++++++++++++++ utils/path_test.go | 17 +++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/utils/path.go b/utils/path.go index 3b2e44192..624d7dab8 100644 --- a/utils/path.go +++ b/utils/path.go @@ -19,7 +19,9 @@ package utils import ( "io" "os" + "os/user" "path/filepath" + "strings" ) // CopyFile copies from src to dst until either EOF is reached @@ -46,3 +48,15 @@ func CopyFile(src, dst string) (int64, error) { defer df.Close() return io.Copy(df, sf) } + +// HomeDirExpand expands the tilde (~) in the front of a path to a the supplied +// directory. +func HomeDirExpand(u *user.User, path string) string { + if path == "~" { + return u.HomeDir + } else if strings.HasPrefix(path, "~/") { + return filepath.Join(u.HomeDir, strings.TrimPrefix(path, "~/")) + } + + return path +} diff --git a/utils/path_test.go b/utils/path_test.go index b0c4fa8b3..62016f528 100644 --- a/utils/path_test.go +++ b/utils/path_test.go @@ -19,6 +19,7 @@ package utils import ( "io/ioutil" "os" + "os/user" "testing" . "github.com/smartystreets/goconvey/convey" @@ -47,3 +48,19 @@ func TestCopyFile(t *testing.T) { So(n, ShouldBeZeroValue) }) } + +func TestHomeDirExpand(t *testing.T) { + Convey("expand ~ dir", t, func() { + usr, err := user.Current() + So(err, ShouldBeNil) + + homeDir := HomeDirExpand(usr, "~") + So(homeDir, ShouldEqual, usr.HomeDir) + + fullFilepathWithHome := HomeDirExpand(usr, "~/.local") + So(fullFilepathWithHome, ShouldEqual, usr.HomeDir+"/.local") + + fullFilepathRaw := HomeDirExpand(usr, "/dev/null") + So(fullFilepathRaw, ShouldEqual, "/dev/null") + }) +} From fe4c4e955b507f9700526cf93163b2ac28783241 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 17:15:28 +0800 Subject: [PATCH 213/302] Remove user params in HomeDirExpand func. --- utils/path.go | 15 ++++++++++----- utils/path_test.go | 6 +++--- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/utils/path.go b/utils/path.go index 624d7dab8..e0ad6f19c 100644 --- a/utils/path.go +++ b/utils/path.go @@ -49,13 +49,18 @@ func CopyFile(src, dst string) (int64, error) { return io.Copy(df, sf) } -// HomeDirExpand expands the tilde (~) in the front of a path to a the supplied -// directory. -func HomeDirExpand(u *user.User, path string) string { +// HomeDirExpand tries to expand the tilde (~) in the front of a path +// to a fullpath directory. +func HomeDirExpand(path string) string { + usr, err := user.Current() + if err != nil { + return path + } + if path == "~" { - return u.HomeDir + return usr.HomeDir } else if strings.HasPrefix(path, "~/") { - return filepath.Join(u.HomeDir, strings.TrimPrefix(path, "~/")) + return filepath.Join(usr.HomeDir, strings.TrimPrefix(path, "~/")) } return path diff --git a/utils/path_test.go b/utils/path_test.go index 62016f528..04ff56d6f 100644 --- a/utils/path_test.go +++ b/utils/path_test.go @@ -54,13 +54,13 @@ func TestHomeDirExpand(t *testing.T) { usr, err := user.Current() So(err, ShouldBeNil) - homeDir := HomeDirExpand(usr, "~") + homeDir := HomeDirExpand("~") So(homeDir, ShouldEqual, usr.HomeDir) - fullFilepathWithHome := HomeDirExpand(usr, "~/.local") + fullFilepathWithHome := HomeDirExpand("~/.local") So(fullFilepathWithHome, ShouldEqual, usr.HomeDir+"/.local") - fullFilepathRaw := HomeDirExpand(usr, "/dev/null") + fullFilepathRaw := HomeDirExpand("/dev/null") So(fullFilepathRaw, ShouldEqual, "/dev/null") }) } From fd58b891626e36c2d6fa185c367d0505e3cc1cd8 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 17:33:32 +0800 Subject: [PATCH 214/302] Remove unreachable code. --- client/driver.go | 1 - 1 file changed, 1 deletion(-) diff --git a/client/driver.go b/client/driver.go index 5af1ace39..09b2a93d9 100644 --- a/client/driver.go +++ b/client/driver.go @@ -410,7 +410,6 @@ func WaitTxConfirmation( return } } - return } func getNonce(addr proto.AccountAddress) (nonce interfaces.AccountNonce, err error) { From cd71df90b55b0e4358b092b30e4d94c96fdca882 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 19:53:28 +0800 Subject: [PATCH 215/302] Change cql-utils use ~/.cql as config.yaml/private.key default location. --- cmd/cql-utils/confgen.go | 4 +++- cmd/cql-utils/main.go | 8 ++++++-- utils/path_test.go | 3 +++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index cc7aa8ee6..940b19f18 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -27,6 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf/testnet" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" yaml "gopkg.in/yaml.v2" ) @@ -36,10 +37,11 @@ var ( ) func init() { - flag.StringVar(&workingRoot, "root", "conf", "confgen root is the working root directory containing all auto-generating keys and certifications") + flag.StringVar(&workingRoot, "root", "~/.cql", "confgen root is the working root directory containing all auto-generating keys and certifications") } func runConfgen() { + workingRoot = utils.HomeDirExpand(workingRoot) if workingRoot == "" { log.Error("root directory is required for confgen") os.Exit(1) diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index ce94a3dd8..3c027fd4b 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -23,6 +23,7 @@ import ( "runtime" "syscall" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/crypto/ssh/terminal" ) @@ -44,8 +45,8 @@ func init() { flag.StringVar(&tool, "tool", "", "tool type, miner, keygen, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") flag.StringVar(&publicKeyHex, "public", "", "public key hex string to mine node id/nonce") - flag.StringVar(&privateKeyFile, "private", "private.key", "private key file to generate/show") - flag.StringVar(&configFile, "config", "config.yaml", "config file to use") + flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "private key file to generate/show") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "config file to use") flag.BoolVar(&skipMasterKey, "skip-master-key", false, "use empty master key") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -59,6 +60,9 @@ func main() { } log.Infof("cql-utils build: %#v\n", version) + configFile = utils.HomeDirExpand(configFile) + privateKeyFile = utils.HomeDirExpand(privateKeyFile) + switch tool { case "miner": if publicKeyHex == "" && privateKeyFile == "" { diff --git a/utils/path_test.go b/utils/path_test.go index 04ff56d6f..fe1cad44a 100644 --- a/utils/path_test.go +++ b/utils/path_test.go @@ -62,5 +62,8 @@ func TestHomeDirExpand(t *testing.T) { fullFilepathRaw := HomeDirExpand("/dev/null") So(fullFilepathRaw, ShouldEqual, "/dev/null") + + emptyPath := HomeDirExpand("") + So(emptyPath, ShouldEqual, "") }) } From a948f2429a868c9524d01b6652bf00403fea8cbf Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 20:51:51 +0800 Subject: [PATCH 216/302] Update cql-utils docs. Disable keygen params --- cmd/cql-utils/README-zh.md | 8 ++++---- cmd/cql-utils/README.md | 8 ++++---- cmd/cql-utils/confgen.go | 6 +++++- cmd/cql-utils/main.go | 17 +++++++++-------- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/cmd/cql-utils/README-zh.md b/cmd/cql-utils/README-zh.md index cb36fd688..37a78bb72 100644 --- a/cmd/cql-utils/README-zh.md +++ b/cmd/cql-utils/README-zh.md @@ -11,19 +11,19 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql-utils ### 生成公私钥对 ``` -$ cql-utils -tool keygen +$ cql-utils -tool confgen Enter master key(press Enter for default: ""): ⏎ Private key file: private.key Public key's hex: 03bc9e90e3301a2f5ae52bfa1f9e033cde81b6b6e7188b11831562bf5847bff4c0 ``` -生成的 private.key 文件即是使用主密码加密过的私钥文件,而输出到屏幕上的字符串就是使用十六进制进行编码的公钥。 +生成的 ~/.cql/private.key 文件即是使用主密码加密过的私钥文件,而输出到屏幕上的字符串就是使用十六进制进行编码的公钥。 ### 使用私钥文件或公钥生成钱包地址 ``` -$ cql-utils -tool addrgen -private private.key +$ cql-utils -tool addrgen Enter master key(default: ""): ⏎ wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 @@ -31,4 +31,4 @@ $ cql-utils -tool addrgen -public 02f2707c1c6955a9019cd9d02ade37b931fbfa286a1163 wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 ``` -你可以通过指定私钥文件,或者把上述的公钥十六进制编码字符串作为命令行参数来直接生成钱包地址。 \ No newline at end of file +你也可以通过-private指定私钥文件,或者把上述的公钥十六进制编码字符串作为命令行参数来直接生成钱包地址。 diff --git a/cmd/cql-utils/README.md b/cmd/cql-utils/README.md index b66ea80b7..d9da3a7ad 100644 --- a/cmd/cql-utils/README.md +++ b/cmd/cql-utils/README.md @@ -11,19 +11,19 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql-utils ### Generate Key Pair ``` -$ cql-utils -tool keygen +$ cql-utils -tool confgen Enter master key(press Enter for default: ""): ⏎ Private key file: private.key Public key's hex: 03bc9e90e3301a2f5ae52bfa1f9e033cde81b6b6e7188b11831562bf5847bff4c0 ``` -The private.key is your encrypted private key file, and the pubkey hex is your public key's hex. +The ~/.cql/private.key is your encrypted private key file, and the pubkey hex is your public key's hex. ### Generate Wallet Address from existing Key ``` -$ cql-utils -tool addrgen -private private.key +$ cql-utils -tool addrgen Enter master key(default: ""): ⏎ wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 @@ -31,4 +31,4 @@ $ cql-utils -tool addrgen -public 02f2707c1c6955a9019cd9d02ade37b931fbfa286a1163 wallet address: 4jXvNvPHKNPU8Sncz5u5F5WSGcgXmzC1g8RuAXTCJzLsbF9Dsf9 ``` -You can generate your *wallet* address for test net according to your private key or public key. +You can generate your *wallet* address for test net according to your private key(default ~/.cql/private) or public key. diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index 940b19f18..304a9b5d4 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -62,7 +62,11 @@ func runConfgen() { os.Exit(1) } if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { - os.RemoveAll(workingRoot) + err = os.RemoveAll(workingRoot) + if err != nil { + log.WithError(err).Error("unexpected error") + os.Exit(1) + } } else { os.Exit(0) } diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 3c027fd4b..a7129c453 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -43,7 +43,7 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "tool type, miner, keygen, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") + flag.StringVar(&tool, "tool", "", "tool type, miner, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") flag.StringVar(&publicKeyHex, "public", "", "public key hex string to mine node id/nonce") flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "private key file to generate/show") flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "config file to use") @@ -71,13 +71,14 @@ func main() { os.Exit(1) } runMiner() - case "keygen": - if privateKeyFile == "" { - // error - log.Error("privateKey path is required for keygen") - os.Exit(1) - } - runKeygen() + // Disable keygen independent call + //case "keygen": + // if privateKeyFile == "" { + // // error + // log.Error("privateKey path is required for keygen") + // os.Exit(1) + // } + // runKeygen() case "keytool": if privateKeyFile == "" { // error From 7d6369d5f8996ac92d263c5b4a43c29f100b5503 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:06:40 +0800 Subject: [PATCH 217/302] Change cql to use ~/.cql as config.yaml default location. --- cmd/cql/README-zh.md | 6 +++--- cmd/cql/README.md | 6 +++--- cmd/cql/main.go | 6 +++++- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/cmd/cql/README-zh.md b/cmd/cql/README-zh.md index b4ca69a3a..a591243b3 100644 --- a/cmd/cql/README-zh.md +++ b/cmd/cql/README-zh.md @@ -19,7 +19,7 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql 使用 `cql` 命令来检查钱包余额: ```bash -$ cql -config conf/config.yaml -get-balance +$ cql -get-balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -36,7 +36,7 @@ INFO[0000] covenant coin balance is: 0 caller="main.go:247 mai ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -config conf/config.yaml -create 1 +$ cql -create 1 INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -48,7 +48,7 @@ INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4 这里 `-create 1` 表示创建一个单节点的 SQLChain。 ```bash -$ cql -config conf/config.yaml -dsn covenantsql://address +$ cql -dsn covenantsql://address ``` `address` 就是你的数据库 ID。 diff --git a/cmd/cql/README.md b/cmd/cql/README.md index 4d3ec5074..ab5b3c9f5 100644 --- a/cmd/cql/README.md +++ b/cmd/cql/README.md @@ -19,7 +19,7 @@ See: [cql-utils doc](https://github.com/CovenantSQL/CovenantSQL/tree/develop/cmd Use `cql` to check your wallet balance: ```bash -$ cql -config conf/config.yaml -get-balance +$ cql -get-balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -37,7 +37,7 @@ You can get a database id when create a new SQL Chain: ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -config conf/config.yaml -create 1 +$ cql -create 1 INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -49,7 +49,7 @@ INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4 Here, `-create 1` refers that there is only one node in SQL Chain. ```bash -$ cql -config conf/config.yaml -dsn covenantsql://address +$ cql -dsn covenantsql://address ``` `address` is database id. diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 8e4eabc76..cf820e163 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -39,6 +39,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/xo/dburl" @@ -216,7 +217,7 @@ func init() { flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.StringVar(&outFile, "out", "", "Record stdout to file") - flag.StringVar(&configFile, "config", "config.yaml", "Config file for covenantsql") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") flag.StringVar(&password, "password", "", "Master key password for covenantsql") flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") flag.Var(&variables, "variable", "Set variable") @@ -238,6 +239,9 @@ func main() { name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } + log.Infof("cql build: %#v\n", version) + + configFile = utils.HomeDirExpand(configFile) var err error From 0f1fdd857763b1b67e2f74c316e0a6c679992903 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:07:23 +0800 Subject: [PATCH 218/302] Update testnet client test. Format cql-utils command help --- cmd/cql-utils/main.go | 10 +++++----- test/testnet_client/run.sh | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index a7129c453..9099daeff 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -43,11 +43,11 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "tool type, miner, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") - flag.StringVar(&publicKeyHex, "public", "", "public key hex string to mine node id/nonce") - flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "private key file to generate/show") - flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "config file to use") - flag.BoolVar(&skipMasterKey, "skip-master-key", false, "use empty master key") + flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, rpc, nonce, confgen, addrgen, adapterconfgen") + flag.StringVar(&publicKeyHex, "public", "", "Public key hex string to mine node id/nonce") + flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "Private key file to generate/show") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file to use") + flag.BoolVar(&skipMasterKey, "skip-master-key", false, "Use empty master key") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 13b71f7b8..a17511d0c 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -13,7 +13,7 @@ echo ${PROJECT_DIR} cd ${TEST_WD} echo -ne "y\n" | ${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key -${PROJECT_DIR}/bin/cql-utils -tool addrgen -private ./conf/private.key -skip-master-key | tee wallet.txt +${PROJECT_DIR}/bin/cql-utils -tool addrgen -skip-master-key | tee wallet.txt #get wallet addr wallet=$(awk '{print $3}' wallet.txt) @@ -22,17 +22,17 @@ wallet=$(awk '{print $3}' wallet.txt) ${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm -${PROJECT_DIR}/bin/cql -config conf/config.yaml -get-balance +${PROJECT_DIR}/bin/cql -get-balance -${PROJECT_DIR}/bin/cql -config conf/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt +${PROJECT_DIR}/bin/cql -create 2 -wait-tx-confirm | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ +${PROJECT_DIR}/bin/cql -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' -${PROJECT_DIR}/bin/cql -config conf/config.yaml -dsn ${dsn} \ +${PROJECT_DIR}/bin/cql -dsn ${dsn} \ -command 'show tables;' | tee result.log grep "1 row" result.log From 3cdc4e94f6794221a32294ba16315f4550c455b3 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:27:57 +0800 Subject: [PATCH 219/302] Change cql-adapter to use ~/.cql as config.yaml default location. --- cmd/cql-adapter/README.md | 10 ++++++---- cmd/cql-adapter/main.go | 5 ++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/cmd/cql-adapter/README.md b/cmd/cql-adapter/README.md index dbc4a3b2e..3787a4ed2 100644 --- a/cmd/cql-adapter/README.md +++ b/cmd/cql-adapter/README.md @@ -42,12 +42,14 @@ Created a new certificate valid for the following names 📜 - "server" The certificate is at "./server.pem" and the key at "./server-key.pem" ✅ + +And move them to ~/.cql/ dir. `````` You can use following interactive command to generate adapter config. ```shell -$ cql-utils -tool adapterconfgen -config config.yaml +$ cql-utils -tool adapterconfgen ListenAddr (default: 0.0.0.0:4661): ⏎ CertificatePath (default: server.pem): ⏎ PrivateKeyPath (default: server-key.pem): ⏎ @@ -58,7 +60,7 @@ WriteCerts (default:): ⏎ StorageDriver (default: covenantsql): ⏎ StorageRoot (default:): ⏎ -$ tail -n 20 config.yaml +$ tail -n 20 ~/.cql/config.yaml ... skipping irrelevant configuration Adapter: ListenAddr: 0.0.0.0:4661 @@ -79,7 +81,7 @@ Adapter: Start the adapter by following commands: ```shell -$ cql-adapter -config config.yaml +$ cql-adapter ``` ### API @@ -266,4 +268,4 @@ curl -v https://e.morenodes.com:11108/v1/query --insecure \ ###### Parameters -**database:** database id \ No newline at end of file +**database:** database id diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go index 100a5b552..3b28f522e 100644 --- a/cmd/cql-adapter/main.go +++ b/cmd/cql-adapter/main.go @@ -26,6 +26,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -40,7 +41,7 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file for adapter") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "config file for adapter") flag.StringVar(&password, "password", "", "master key password") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") @@ -55,6 +56,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From e142bcc9ca32a6595cf623e96916b82916092188 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:34:45 +0800 Subject: [PATCH 220/302] Change cql-mysql-adapter to use ~/.cql as config.yaml default location. --- cmd/cql-adapter/main.go | 4 ++-- cmd/cql-mysql-adapter/README.md | 6 +++--- cmd/cql-mysql-adapter/main.go | 15 +++++++++------ 3 files changed, 14 insertions(+), 11 deletions(-) diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go index 3b28f522e..7866fefb6 100644 --- a/cmd/cql-adapter/main.go +++ b/cmd/cql-adapter/main.go @@ -41,8 +41,8 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "config file for adapter") - flag.StringVar(&password, "password", "", "master key password") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for adapter") + flag.StringVar(&password, "password", "", "Master key password") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") diff --git a/cmd/cql-mysql-adapter/README.md b/cmd/cql-mysql-adapter/README.md index 73eeaa4b3..2cf2aca53 100644 --- a/cmd/cql-mysql-adapter/README.md +++ b/cmd/cql-mysql-adapter/README.md @@ -22,7 +22,7 @@ Generate the main configuration file. Same as [Generating Default Config File in Start the mysql adapter by following commands: ```shell -$ cql-mysql-adapter -config config.yaml +$ cql-mysql-adapter ``` The default mysql user is ```root``` and the default mysql password is ```calvin```, which can be modified as optional arguments of mysql adapter. @@ -36,7 +36,7 @@ Usage of ./cql-mysql-adapter: -bypass-signature Disable signature sign and verify, for testing -config string - config file for mysql adapter (default "./config.yaml") + config file for mysql adapter (default "~/.cql/config.yaml") -listen string listen address for mysql adapter (default "127.0.0.1:4664") -mysql-password string @@ -82,4 +82,4 @@ mysql> show tables; mysql> quit Bye -``` \ No newline at end of file +``` diff --git a/cmd/cql-mysql-adapter/main.go b/cmd/cql-mysql-adapter/main.go index aad7ce4b0..0ec23e2b3 100644 --- a/cmd/cql-mysql-adapter/main.go +++ b/cmd/cql-mysql-adapter/main.go @@ -25,6 +25,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -44,16 +45,16 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file for mysql adapter") - flag.StringVar(&password, "password", "", "master key password") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for mysql adapter") + flag.StringVar(&password, "password", "", "Master key password") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "listen address for mysql adapter") - flag.StringVar(&mysqlUser, "mysql-user", "root", "mysql user for adapter server") - flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "mysql password for adapter server") - flag.StringVar(&logLevel, "log-level", "", "service log level") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4664", "Listen address for mysql adapter") + flag.StringVar(&mysqlUser, "mysql-user", "root", "MySQL user for adapter server") + flag.StringVar(&mysqlPassword, "mysql-password", "calvin", "MySQL password for adapter server") + flag.StringVar(&logLevel, "log-level", "", "Service log level") } func main() { @@ -65,6 +66,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From a9cdb897b8f3558748ffc22312519324728e1827 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:40:12 +0800 Subject: [PATCH 221/302] Change cql-faucet to use ~/.cql as config.yaml default location. --- cmd/cql-faucet/main.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index 14496f3f7..83362c39d 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -28,6 +28,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "golang.org/x/sys/unix" ) @@ -42,8 +43,8 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "config.yaml", "configuration file for covenantsql") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Configuration file for covenantsql") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") @@ -57,6 +58,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From 2e8a8f202238c1cb50a21b6b807d007629343961 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:52:46 +0800 Subject: [PATCH 222/302] Change cql-explorer to use ~/.cql as config.yaml default location. --- cmd/cql-explorer/README.md | 12 ++++++------ cmd/cql-explorer/main.go | 11 +++++++---- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/cmd/cql-explorer/README.md b/cmd/cql-explorer/README.md index 6bce4872e..c321918ba 100644 --- a/cmd/cql-explorer/README.md +++ b/cmd/cql-explorer/README.md @@ -21,7 +21,7 @@ Generate the main configuration file. Same as [Generating Default Config File in Start the explorer by following commands: ```shell -$ cql-explorer -config config.yaml +$ cql-explorer ``` The available options are: @@ -30,13 +30,13 @@ The available options are: $ cql-explorer --help Usage of cql-explorer: -config string - config file path (default "./config.yaml") + Config file path (default "~/.cql/config.yaml") -interval duration - new block check interval for explorer (default 2s) + New block check interval for explorer (default 2s) -listen string - listen address for http explorer api (default "127.0.0.1:4665") + Listen address for http explorer api (default "127.0.0.1:4665") -password string - master key password for covenantsql + Master key password for covenantsql ``` ### API @@ -179,4 +179,4 @@ __hash__: hash of specified tx } } } -``` \ No newline at end of file +``` diff --git a/cmd/cql-explorer/main.go b/cmd/cql-explorer/main.go index 318764389..b1fbca26f 100644 --- a/cmd/cql-explorer/main.go +++ b/cmd/cql-explorer/main.go @@ -28,6 +28,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -47,10 +48,10 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file path") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4665", "listen address for http explorer api") - flag.DurationVar(&checkInterval, "interval", time.Second*2, "new block check interval for explorer") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4665", "Listen address for http explorer api") + flag.DurationVar(&checkInterval, "interval", time.Second*2, "New block check interval for explorer") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") } @@ -65,6 +66,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From 9f0b86d2901e425d2202e2ccfac946ca1323fdae Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 21:58:02 +0800 Subject: [PATCH 223/302] Change cql-fuse to use ~/.cql as config.yaml default location. --- cmd/cql-fuse/main.go | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go index 6d6738d18..a89986dd7 100644 --- a/cmd/cql-fuse/main.go +++ b/cmd/cql-fuse/main.go @@ -76,6 +76,7 @@ import ( "bazil.org/fuse/fs" _ "bazil.org/fuse/fs/fstestutil" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -87,23 +88,25 @@ var usage = func() { func main() { var ( - config string + configFile string dsn string mountPoint string password string readOnly bool ) - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") - flag.StringVar(&mountPoint, "mount", "./", "dir to mount") - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&password, "password", "", "master key password for covenantsql") - flag.BoolVar(&readOnly, "readonly", false, "mount read only volume") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&mountPoint, "mount", "./", "Dir to mount") + flag.StringVar(&dsn, "dsn", "", "Database url") + flag.StringVar(&password, "password", "", "Master key password for covenantsql") + flag.BoolVar(&readOnly, "readonly", false, "Mount read only volume") flag.Usage = usage flag.Parse() log.SetLevel(log.InfoLevel) - err := client.Init(config, []byte(password)) + configFile = utils.HomeDirExpand(configFile) + + err := client.Init(configFile, []byte(password)) if err != nil { log.Fatal(err) } From 88fbd891a5146e9427abe19446fa7cd2bfa9d890 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 22:07:12 +0800 Subject: [PATCH 224/302] Change cql-observer to use ~/.cql as config.yaml default location. --- cmd/cql-observer/main.go | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index 620ca8068..34162cbc3 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -49,14 +50,14 @@ var ( ) func init() { - flag.StringVar(&configFile, "config", "./config.yaml", "config file path") - flag.StringVar(&dbID, "database", "", "database to listen for observation") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") + flag.StringVar(&dbID, "database", "", "Database to listen for observation") flag.BoolVar(&showVersion, "version", false, "Show version information and exit") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&resetPosition, "reset", "", "reset subscribe position") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "listen address for http explorer api") - flag.StringVar(&logLevel, "log-level", "", "service log level") + flag.StringVar(&resetPosition, "reset", "", "Reset subscribe position") + flag.StringVar(&listenAddr, "listen", "127.0.0.1:4663", "Listen address for http explorer api") + flag.StringVar(&logLevel, "log-level", "", "Service log level") } func main() { @@ -70,6 +71,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From dd26565a9049a7eb06636be09c43ea3d117344f0 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 22:58:21 +0800 Subject: [PATCH 225/302] Set errexit pipefail for compatibility-testnet. --- .gitlab-ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a8ac0214e..9353d2a66 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -29,7 +29,9 @@ test-my-project: compatibility-testnet: stage: test script: + - set -o errexit + - set -o pipefail - make clean - make -j8 client - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - bash test/testnet_client/run.sh + - bash -x test/testnet_client/run.sh From 1fd9871432de026ad38078dbc8cea7f5e413c285 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 23 Jan 2019 23:00:02 +0800 Subject: [PATCH 226/302] Remove osusergo build tags. Add build tags for unittest. --- .gitlab-ci.yml | 3 ++- Makefile | 2 +- alltest.sh | 6 +++--- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9353d2a66..d37c880ec 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,6 +4,7 @@ variables: REVIEWDOG_VERSION: 0.9.11 REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN + UNITTESTTAGS: -tags "linux sqlite_omit_load_extension" before_script: # Setup dependency management tool @@ -33,5 +34,5 @@ compatibility-testnet: - set -o pipefail - make clean - make -j8 client - - go test -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - go test $UNITTESTTAGS -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash -x test/testnet_client/run.sh diff --git a/Makefile b/Makefile index 34a2a0e4a..da0c10c0d 100644 --- a/Makefile +++ b/Makefile @@ -126,7 +126,7 @@ endif version := $(branch)-$(GIT_COMMIT)-$(builddate) -tags := $(platform) sqlite_omit_load_extension osusergo +tags := $(platform) sqlite_omit_load_extension testtags := $(tags) testbinary test_flags := -coverpkg github.com/CovenantSQL/CovenantSQL/... -cover -race -c diff --git a/alltest.sh b/alltest.sh index b2f74d45f..f837347d2 100755 --- a/alltest.sh +++ b/alltest.sh @@ -14,7 +14,7 @@ test::package() { local coverage_file="${package//\//.}.cover.out" echo "[TEST] package=${package}, coverage=${coverage_file}" - go test -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" + go test $UNITTESTTAGS -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" } main() { @@ -30,9 +30,9 @@ main() { bash <(curl -s https://codecov.io/bash) # some benchmarks - go test -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ + go test $UNITTESTTAGS -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ bash cleanupDB.sh || true - go test -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + go test $UNITTESTTAGS -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ bash cleanupDB.sh || true } From db9753687988b611a704610662f379e7aaceccb0 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 10:23:54 +0800 Subject: [PATCH 227/302] Set -x for gitlabci process. --- .gitlab-ci.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d37c880ec..e73b48302 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -25,11 +25,14 @@ before_script: test-my-project: stage: test - script: ./alltest.sh + script: + - set -x + - ./alltest.sh compatibility-testnet: stage: test script: + - set -x - set -o errexit - set -o pipefail - make clean From 682f729bb462ca6c7f809f17b18b2453a3fdc0cb Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 10:30:40 +0800 Subject: [PATCH 228/302] Fix go test unit test tags quote err. --- .gitlab-ci.yml | 4 ++-- alltest.sh | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index e73b48302..b2da8381e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -4,7 +4,7 @@ variables: REVIEWDOG_VERSION: 0.9.11 REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN - UNITTESTTAGS: -tags "linux sqlite_omit_load_extension" + UNITTESTTAGS: linux sqlite_omit_load_extension before_script: # Setup dependency management tool @@ -37,5 +37,5 @@ compatibility-testnet: - set -o pipefail - make clean - make -j8 client - - go test $UNITTESTTAGS -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - bash -x test/testnet_client/run.sh diff --git a/alltest.sh b/alltest.sh index f837347d2..e7d3a6170 100755 --- a/alltest.sh +++ b/alltest.sh @@ -14,7 +14,7 @@ test::package() { local coverage_file="${package//\//.}.cover.out" echo "[TEST] package=${package}, coverage=${coverage_file}" - go test $UNITTESTTAGS -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" } main() { @@ -30,9 +30,9 @@ main() { bash <(curl -s https://codecov.io/bash) # some benchmarks - go test $UNITTESTTAGS -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ + go test -tags "$UNITTESTTAGS" -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ bash cleanupDB.sh || true - go test $UNITTESTTAGS -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + go test -tags "$UNITTESTTAGS" -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ bash cleanupDB.sh || true } From 48753cb7e325ac97636c1b6a6a15ea01383eba86 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 10:39:32 +0800 Subject: [PATCH 229/302] Set -x for alltest.sh --- .gitlab-ci.yml | 4 ++-- alltest.sh | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b2da8381e..b42fa3cbc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,10 +32,10 @@ test-my-project: compatibility-testnet: stage: test script: - - set -x - set -o errexit - set -o pipefail - make clean - make -j8 client - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - bash -x test/testnet_client/run.sh + - set -x + - ./test/testnet_client/run.sh diff --git a/alltest.sh b/alltest.sh index e7d3a6170..b7e39220a 100755 --- a/alltest.sh +++ b/alltest.sh @@ -3,6 +3,7 @@ set -o errexit set -o pipefail set -o nounset +set -x test::package() { local package="${1:-notset}" From c5e317cbc93d6120aa0b3a9812f5b6cd44e9a7ff Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 11:09:25 +0800 Subject: [PATCH 230/302] Move set -x location in alltest.sh --- alltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alltest.sh b/alltest.sh index b7e39220a..27a419cdf 100755 --- a/alltest.sh +++ b/alltest.sh @@ -3,7 +3,6 @@ set -o errexit set -o pipefail set -o nounset -set -x test::package() { local package="${1:-notset}" @@ -27,6 +26,7 @@ main() { test::package "${package}" done + set -x gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out bash <(curl -s https://codecov.io/bash) From 9d2c7005c85ec09fabb4e9e9a4b455d3df069f28 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 11:42:44 +0800 Subject: [PATCH 231/302] Change cql-minerd to use ~/.cql as config.yaml default location. --- cmd/cql-minerd/main.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 62e9f1739..c78b84af9 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -96,7 +96,7 @@ func init() { flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") flag.StringVar(&profileServer, "profile-server", "", "Profile server address, default not started") flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") @@ -132,6 +132,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From 54b5d50946e120ce8f8e3ad72492422bf462c1f8 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 11:48:07 +0800 Subject: [PATCH 232/302] Change cqld to use ~/.cql as config.yaml default location. --- cmd/cqld/main.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 68c0a19f5..ca4d92bd1 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -71,7 +71,7 @@ func init() { flag.BoolVar(&showVersion, "version", false, "Show version information and exit") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") - flag.StringVar(&configFile, "config", "./config.yaml", "Config file path") + flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") flag.StringVar(&cpuProfile, "cpu-profile", "", "Path to file for CPU profiling information") flag.StringVar(&memProfile, "mem-profile", "", "Path to file for memory profiling information") @@ -106,6 +106,8 @@ func main() { os.Exit(0) } + configFile = utils.HomeDirExpand(configFile) + flag.Visit(func(f *flag.Flag) { log.Infof("args %#v : %s", f.Name, f.Value) }) From 6be50794674f6124ff0c12161a27686c14438504 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 13:58:12 +0800 Subject: [PATCH 233/302] Use default config file path(~/.cql/config.yaml) if driver not call Init. --- client/driver.go | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/client/driver.go b/client/driver.go index 09b2a93d9..ece39b3a0 100644 --- a/client/driver.go +++ b/client/driver.go @@ -37,6 +37,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/pkg/errors" ) @@ -63,12 +64,15 @@ var ( connIDAvail []uint64 globalSeqNo uint64 randSource = rand.New(rand.NewSource(time.Now().UnixNano())) + + defaultConfigFile = "~/.cql/config.yaml" ) func init() { d := new(covenantSQLDriver) sql.Register(DBScheme, d) sql.Register(DBSchemeAlias, d) + log.Debug("CovenantSQL driver registered.") } // covenantSQLDriver implements sql.Driver interface. @@ -83,8 +87,10 @@ func (d *covenantSQLDriver) Open(dsn string) (conn driver.Conn, err error) { } if atomic.LoadUint32(&driverInitialized) == 0 { - err = ErrNotInitialized - return + err = defaultInit() + if err != nil && err != ErrAlreadyInitialized { + return + } } return newConn(cfg) @@ -97,6 +103,18 @@ type ResourceMeta struct { AdvancePayment uint64 } +func defaultInit() (err error) { + configFile := utils.HomeDirExpand(defaultConfigFile) + if configFile == defaultConfigFile { + //System not support ~ dir, need Init manually. + log.Debugf("Could not find CovenantSQL default config location: %v", configFile) + return ErrNotInitialized + } + + log.Debugf("Using CovenantSQL default config location: %v", configFile) + return Init(configFile, []byte("")) +} + // Init defines init process for client. func Init(configFile string, masterKey []byte) (err error) { if !atomic.CompareAndSwapUint32(&driverInitialized, 0, 1) { From aea44af93204534bbfb01d04b05b2a57db61af92 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 24 Jan 2019 14:16:17 +0800 Subject: [PATCH 234/302] Update client example --- .../_example/{ => gdpaverage}/gdpaverage.go | 5 ++- client/_example/simple.go | 32 ++++++++----------- 2 files changed, 17 insertions(+), 20 deletions(-) rename client/_example/{ => gdpaverage}/gdpaverage.go (97%) diff --git a/client/_example/gdpaverage.go b/client/_example/gdpaverage/gdpaverage.go similarity index 97% rename from client/_example/gdpaverage.go rename to client/_example/gdpaverage/gdpaverage.go index f3a1e098e..4d1577061 100644 --- a/client/_example/gdpaverage.go +++ b/client/_example/gdpaverage/gdpaverage.go @@ -21,6 +21,7 @@ import ( "flag" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -28,11 +29,13 @@ func main() { log.SetLevel(log.DebugLevel) var config, password, dsn string - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") + flag.StringVar(&config, "config", "~/.cql/config.yaml", "config file path") flag.StringVar(&dsn, "dsn", "", "database url") flag.StringVar(&password, "password", "", "master key password for covenantsql") flag.Parse() + config = utils.HomeDirExpand(config) + err := client.Init(config, []byte(password)) if err != nil { log.Fatal(err) diff --git a/client/_example/simple.go b/client/_example/simple.go index 6b24f4690..75ca77305 100644 --- a/client/_example/simple.go +++ b/client/_example/simple.go @@ -21,33 +21,27 @@ import ( "flag" "fmt" - "github.com/CovenantSQL/CovenantSQL/client" + _ "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/utils/log" ) func main() { log.SetLevel(log.InfoLevel) - var config, password, dsn string + var dsn string - flag.StringVar(&config, "config", "./conf/config.yaml", "config file path") - flag.StringVar(&dsn, "dsn", "", "database url") - flag.StringVar(&password, "password", "", "master key password for covenantsql") + flag.StringVar(&dsn, "dsn", "", "Database url") flag.Parse() - err := client.Init(config, []byte(password)) - if err != nil { - log.Fatal(err) - } - - if dsn == "" { - meta := client.ResourceMeta{} - meta.Node = 2 - dsn, err = client.Create(meta) - if err != nil { - log.Fatal(err) - } - defer client.Drop(dsn) - } + // If your CovenantSQL config.yaml is not in ~/.cql/config.yaml + // Uncomment and edit following code + /* + config := "/data/myconfig/config.yaml" + password := "mypassword" + err := client.Init(config, []byte(password)) + if err != nil { + log.Fatal(err) + } + */ db, err := sql.Open("covenantsql", dsn) if err != nil { From 9b8114c0b24f08e9867ad5c1e5db2fcfb9c955ee Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 28 Jan 2019 10:34:19 +0800 Subject: [PATCH 235/302] Fix missing 'linux' building tags. --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index da0c10c0d..e1bf36bce 100644 --- a/Makefile +++ b/Makefile @@ -120,8 +120,8 @@ builddate := $(shell date +%Y%m%d%H%M%S) unamestr := $(shell uname) -ifeq ($(unamestr),"Linux") -platform := "linux" +ifeq ($(unamestr),Linux) +platform := linux endif version := $(branch)-$(GIT_COMMIT)-$(builddate) From 9ebc1434cdd6c892ba6786e8e732ed37a5e7cb29 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 28 Jan 2019 11:34:35 +0800 Subject: [PATCH 236/302] Regen HashStablePack for v2.0.0 --- blockproducer/interfaces/mixins_gen.go | 7 +- crypto/verifier/common_gen.go | 35 +++-- proto/nodeinfo_gen.go | 37 +++-- proto/proto_gen.go | 66 ++++----- proto/servers_gen.go | 18 +-- types/account_gen.go | 180 ++++++++++--------------- types/ack_type_gen.go | 38 +++--- types/baseaccount_gen.go | 3 +- types/billing_gen.go | 49 +++---- types/billing_request_gen.go | 73 +++++----- types/block_gen.go | 87 ++++++------ types/bp_block_gen.go | 59 ++++---- types/createdb_gen.go | 31 ++--- types/db_service_types_gen.go | 137 +++++++++---------- types/init_service_type_gen.go | 85 ++++++------ types/issuekeys_gen.go | 26 ++-- types/no_ack_report_type_gen.go | 102 ++++++++------ types/provideservice_gen.go | 47 +++---- types/request_type_gen.go | 89 +++++------- types/response_type_gen.go | 84 +++++------- types/transfer_gen.go | 26 ++-- types/update_service_type_gen.go | 23 ++-- types/updatebilling_gen.go | 57 ++++---- types/updatepermission_gen.go | 21 ++- xenomint/types/block_gen.go | 48 +++---- xenomint/types/common_gen.go | 35 +++-- 26 files changed, 649 insertions(+), 814 deletions(-) diff --git a/blockproducer/interfaces/mixins_gen.go b/blockproducer/interfaces/mixins_gen.go index aa7b10d14..257ca5a1f 100644 --- a/blockproducer/interfaces/mixins_gen.go +++ b/blockproducer/interfaces/mixins_gen.go @@ -11,19 +11,18 @@ func (z *TransactionTypeMixin) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendTime(o, z.Timestamp) if oTemp, err := z.TxType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *TransactionTypeMixin) Msgsize() (s int) { - s = 1 + 7 + z.TxType.Msgsize() + 10 + hsp.TimeSize + s = 1 + 10 + hsp.TimeSize + 7 + z.TxType.Msgsize() return } diff --git a/crypto/verifier/common_gen.go b/crypto/verifier/common_gen.go index 943ac0eda..eaa23367a 100644 --- a/crypto/verifier/common_gen.go +++ b/crypto/verifier/common_gen.go @@ -11,49 +11,46 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if z.Signee == nil { + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if z.Signature == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { + if oTemp, err := z.Signature.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if z.Signature == nil { + if z.Signee == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { + if oTemp, err := z.Signee.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.DataHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { + s = 1 + 9 + z.DataHash.Msgsize() + 10 + if z.Signature == nil { s += hsp.NilSize } else { - s += z.Signee.Msgsize() + s += z.Signature.Msgsize() } - s += 10 - if z.Signature == nil { + s += 7 + if z.Signee == nil { s += hsp.NilSize } else { - s += z.Signature.Msgsize() + s += z.Signee.Msgsize() } - s += 9 + z.DataHash.Msgsize() return } diff --git a/proto/nodeinfo_gen.go b/proto/nodeinfo_gen.go index 8d729e099..ecda2a967 100644 --- a/proto/nodeinfo_gen.go +++ b/proto/nodeinfo_gen.go @@ -11,27 +11,26 @@ func (z *AddrAndGas) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.AccountAddress.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendUint64(o, z.GasAmount) // map header, size 1 - o = append(o, 0x83, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.RawNodeID.Hash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.GasAmount) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AddrAndGas) Msgsize() (s int) { - s = 1 + 15 + z.AccountAddress.Msgsize() + 10 + 1 + 5 + z.RawNodeID.Hash.Msgsize() + 10 + hsp.Uint64Size + s = 1 + 15 + z.AccountAddress.Msgsize() + 10 + hsp.Uint64Size + 10 + 1 + 5 + z.RawNodeID.Hash.Msgsize() return } @@ -40,7 +39,14 @@ func (z *Node) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) + o = append(o, 0x85) + o = hsp.AppendString(o, z.Addr) + o = hsp.AppendString(o, string(z.ID)) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if z.PublicKey == nil { o = hsp.AppendNil(o) } else { @@ -50,30 +56,19 @@ func (z *Node) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x85) - o = hsp.AppendString(o, string(z.ID)) - o = append(o, 0x85) o = hsp.AppendInt(o, int(z.Role)) - o = append(o, 0x85) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x85) - o = hsp.AppendString(o, z.Addr) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Node) Msgsize() (s int) { - s = 1 + 10 + s = 1 + 5 + hsp.StringPrefixSize + len(z.Addr) + 3 + hsp.StringPrefixSize + len(string(z.ID)) + 6 + z.Nonce.Msgsize() + 10 if z.PublicKey == nil { s += hsp.NilSize } else { s += z.PublicKey.Msgsize() } - s += 3 + hsp.StringPrefixSize + len(string(z.ID)) + 5 + hsp.IntSize + 6 + z.Nonce.Msgsize() + 5 + hsp.StringPrefixSize + len(z.Addr) + s += 5 + hsp.IntSize return } @@ -96,7 +91,7 @@ func (z *NodeKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { @@ -116,7 +111,7 @@ func (z *RawNodeID) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Hash.MarshalHash(); err != nil { return nil, err } else { diff --git a/proto/proto_gen.go b/proto/proto_gen.go index d0aab0198..e0517ded7 100644 --- a/proto/proto_gen.go +++ b/proto/proto_gen.go @@ -25,7 +25,8 @@ func (z *Envelope) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + o = hsp.AppendInt64(o, int64(z.Expire)) if z.NodeID == nil { o = hsp.AppendNil(o) } else { @@ -35,24 +36,20 @@ func (z *Envelope) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendString(o, z.Version) - o = append(o, 0x84) o = hsp.AppendInt64(o, int64(z.TTL)) - o = append(o, 0x84) - o = hsp.AppendInt64(o, int64(z.Expire)) + o = hsp.AppendString(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Envelope) Msgsize() (s int) { - s = 1 + 7 + s = 1 + 7 + hsp.Int64Size + 7 if z.NodeID == nil { s += hsp.NilSize } else { s += z.NodeID.Msgsize() } - s += 8 + hsp.StringPrefixSize + len(z.Version) + 4 + hsp.Int64Size + 7 + hsp.Int64Size + s += 4 + hsp.Int64Size + 8 + hsp.StringPrefixSize + len(z.Version) return } @@ -61,19 +58,18 @@ func (z *FindNeighborReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + o = hsp.AppendInt(o, z.Count) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) if oTemp, err := z.ID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.Roles))) for za0001 := range z.Roles { if oTemp, err := z.Roles[za0001].MarshalHash(); err != nil { @@ -82,18 +78,15 @@ func (z *FindNeighborReq) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendInt(o, z.Count) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNeighborReq) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 3 + z.ID.Msgsize() + 6 + hsp.ArrayHeaderSize + s = 1 + 6 + hsp.IntSize + 9 + z.Envelope.Msgsize() + 3 + z.ID.Msgsize() + 6 + hsp.ArrayHeaderSize for za0001 := range z.Roles { s += z.Roles[za0001].Msgsize() } - s += 6 + hsp.IntSize return } @@ -102,13 +95,13 @@ func (z *FindNeighborResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) + o = hsp.AppendString(o, z.Msg) o = hsp.AppendArrayHeader(o, uint32(len(z.Nodes))) for za0001 := range z.Nodes { if oTemp, err := z.Nodes[za0001].MarshalHash(); err != nil { @@ -117,18 +110,15 @@ func (z *FindNeighborResp) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - o = hsp.AppendString(o, z.Msg) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNeighborResp) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 6 + hsp.ArrayHeaderSize + s = 1 + 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) + 6 + hsp.ArrayHeaderSize for za0001 := range z.Nodes { s += z.Nodes[za0001].Msgsize() } - s += 4 + hsp.StringPrefixSize + len(z.Msg) return } @@ -137,13 +127,12 @@ func (z *FindNodeReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.ID.MarshalHash(); err != nil { return nil, err } else { @@ -163,7 +152,13 @@ func (z *FindNodeResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendString(o, z.Msg) if z.Node == nil { o = hsp.AppendNil(o) } else { @@ -173,26 +168,17 @@ func (z *FindNodeResp) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - o = hsp.AppendString(o, z.Msg) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *FindNodeResp) Msgsize() (s int) { - s = 1 + 5 + s = 1 + 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) + 5 if z.Node == nil { s += hsp.NilSize } else { s += z.Node.Msgsize() } - s += 9 + z.Envelope.Msgsize() + 4 + hsp.StringPrefixSize + len(z.Msg) return } @@ -201,13 +187,12 @@ func (z *PingReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.Node.MarshalHash(); err != nil { return nil, err } else { @@ -227,13 +212,12 @@ func (z *PingResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.Msg) return } @@ -249,13 +233,12 @@ func (z *UploadMetricsReq) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.MFBytes))) for za0001 := range z.MFBytes { o = hsp.AppendBytes(o, z.MFBytes[za0001]) @@ -277,13 +260,12 @@ func (z *UploadMetricsResp) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.Msg) return } diff --git a/proto/servers_gen.go b/proto/servers_gen.go index 8041564e7..bf852302d 100644 --- a/proto/servers_gen.go +++ b/proto/servers_gen.go @@ -11,14 +11,13 @@ func (z *Peers) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.PeersHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -28,7 +27,7 @@ func (z *Peers) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Peers) Msgsize() (s int) { - s = 1 + 12 + z.PeersHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 12 + z.PeersHeader.Msgsize() return } @@ -37,13 +36,12 @@ func (z *PeersHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) if oTemp, err := z.Leader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.Servers))) for za0001 := range z.Servers { if oTemp, err := z.Servers[za0001].MarshalHash(); err != nil { @@ -52,10 +50,8 @@ func (z *PeersHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - o = hsp.AppendUint64(o, z.Version) - o = append(o, 0x84) o = hsp.AppendUint64(o, z.Term) + o = hsp.AppendUint64(o, z.Version) return } @@ -65,6 +61,6 @@ func (z *PeersHeader) Msgsize() (s int) { for za0001 := range z.Servers { s += z.Servers[za0001].Msgsize() } - s += 8 + hsp.Uint64Size + 5 + hsp.Uint64Size + s += 5 + hsp.Uint64Size + 8 + hsp.Uint64Size return } diff --git a/types/account_gen.go b/types/account_gen.go index e69e63642..1e7a3acbd 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -11,31 +11,28 @@ func (z *Account) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) - o = hsp.AppendArrayHeader(o, uint32(SupportTokenNumber)) - for za0001 := range z.TokenBalance { - o = hsp.AppendUint64(o, z.TokenBalance[za0001]) - } - o = append(o, 0x84) - o = hsp.AppendFloat64(o, z.Rating) o = append(o, 0x84) - if oTemp, err := z.NextNonce.MarshalHash(); err != nil { + if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Address.MarshalHash(); err != nil { + if oTemp, err := z.NextNonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendFloat64(o, z.Rating) + o = hsp.AppendArrayHeader(o, uint32(SupportTokenNumber)) + for za0001 := range z.TokenBalance { + o = hsp.AppendUint64(o, z.TokenBalance[za0001]) + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Account) Msgsize() (s int) { - s = 1 + 13 + hsp.ArrayHeaderSize + (int(SupportTokenNumber) * (hsp.Uint64Size)) + 7 + hsp.Float64Size + 10 + z.NextNonce.Msgsize() + 8 + z.Address.Msgsize() + s = 1 + 8 + z.Address.Msgsize() + 10 + z.NextNonce.Msgsize() + 7 + hsp.Float64Size + 13 + hsp.ArrayHeaderSize + (int(SupportTokenNumber) * (hsp.Uint64Size)) return } @@ -44,53 +41,44 @@ func (z *MinerInfo) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 9 - o = append(o, 0x89, 0x89) - o = hsp.AppendInt32(o, int32(z.Status)) o = append(o, 0x89) + if oTemp, err := z.Address.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Deposit) + o = hsp.AppendString(o, z.EncryptionKey) + o = hsp.AppendString(o, z.Name) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.PendingIncome) + o = hsp.AppendUint64(o, z.ReceivedIncome) + o = hsp.AppendInt32(o, int32(z.Status)) o = hsp.AppendArrayHeader(o, uint32(len(z.UserArrears))) for za0001 := range z.UserArrears { if z.UserArrears[za0001] == nil { o = hsp.AppendNil(o) } else { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.UserArrears[za0001].User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendUint64(o, z.UserArrears[za0001].Arrears) } } - o = append(o, 0x89) - if oTemp, err := z.Address.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - o = hsp.AppendString(o, z.Name) - o = append(o, 0x89) - o = hsp.AppendString(o, z.EncryptionKey) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.PendingIncome) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.ReceivedIncome) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Deposit) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerInfo) Msgsize() (s int) { - s = 1 + 7 + hsp.Int32Size + 12 + hsp.ArrayHeaderSize + s = 1 + 8 + z.Address.Msgsize() + 8 + hsp.Uint64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.StringPrefixSize + len(z.Name) + 7 + z.NodeID.Msgsize() + 14 + hsp.Uint64Size + 15 + hsp.Uint64Size + 7 + hsp.Int32Size + 12 + hsp.ArrayHeaderSize for za0001 := range z.UserArrears { if z.UserArrears[za0001] == nil { s += hsp.NilSize @@ -98,7 +86,6 @@ func (z *MinerInfo) Msgsize() (s int) { s += 1 + 5 + z.UserArrears[za0001].User.Msgsize() + 8 + hsp.Uint64Size } } - s += 8 + z.Address.Msgsize() + 7 + z.NodeID.Msgsize() + 5 + hsp.StringPrefixSize + len(z.Name) + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 14 + hsp.Uint64Size + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size return } @@ -107,13 +94,22 @@ func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 9 - o = append(o, 0x89, 0x89) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x89) + o = hsp.AppendUint64(o, z.Deposit) + o = hsp.AppendUint64(o, z.GasPrice) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x89) + if oTemp, err := z.Provider.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetUser))) for za0001 := range z.TargetUser { if oTemp, err := z.TargetUser[za0001].MarshalHash(); err != nil { @@ -122,38 +118,21 @@ func (z *ProviderProfile) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x89) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x89) - if oTemp, err := z.Provider.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x89) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Deposit) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x89) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProviderProfile) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 11 + hsp.ArrayHeaderSize + s = 1 + 8 + hsp.Uint64Size + 9 + hsp.Uint64Size + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 9 + z.Provider.Msgsize() + 6 + hsp.Uint64Size + 11 + hsp.ArrayHeaderSize for za0001 := range z.TargetUser { s += z.TargetUser[za0001].Msgsize() } - s += 14 + hsp.Float64Size + 9 + z.Provider.Msgsize() + 7 + z.NodeID.Msgsize() + 8 + hsp.Uint64Size + 9 + hsp.Uint64Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 10 + z.TokenType.Msgsize() return } @@ -162,19 +141,25 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 11 - o = append(o, 0x8b, 0x8b) - if oTemp, err := z.Meta.MarshalHash(); err != nil { + o = append(o, 0x8b) + if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x8b) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = hsp.AppendBytes(o, z.EncodedGenesis) + o = hsp.AppendUint64(o, z.GasPrice) + if oTemp, err := z.ID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint32(o, z.LastUpdatedHeight) + if oTemp, err := z.Meta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x8b) o = hsp.AppendArrayHeader(o, uint32(len(z.Miners))) for za0001 := range z.Miners { if z.Miners[za0001] == nil { @@ -187,7 +172,17 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x8b) + if oTemp, err := z.Owner.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Period) + if oTemp, err := z.TokenType.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Users))) for za0002 := range z.Users { if z.Users[za0002] == nil { @@ -200,38 +195,12 @@ func (z *SQLChainProfile) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x8b) - o = hsp.AppendBytes(o, z.EncodedGenesis) - o = append(o, 0x8b) - if oTemp, err := z.Owner.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - if oTemp, err := z.Address.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - if oTemp, err := z.ID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x8b) - o = hsp.AppendUint32(o, z.LastUpdatedHeight) - o = append(o, 0x8b) - o = hsp.AppendUint64(o, z.Period) - o = append(o, 0x8b) - o = hsp.AppendUint64(o, z.GasPrice) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainProfile) Msgsize() (s int) { - s = 1 + 5 + z.Meta.Msgsize() + 10 + z.TokenType.Msgsize() + 7 + hsp.ArrayHeaderSize + s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.BytesPrefixSize + len(z.EncodedGenesis) + 9 + hsp.Uint64Size + 3 + z.ID.Msgsize() + 18 + hsp.Uint32Size + 5 + z.Meta.Msgsize() + 7 + hsp.ArrayHeaderSize for za0001 := range z.Miners { if z.Miners[za0001] == nil { s += hsp.NilSize @@ -239,7 +208,7 @@ func (z *SQLChainProfile) Msgsize() (s int) { s += z.Miners[za0001].Msgsize() } } - s += 6 + hsp.ArrayHeaderSize + s += 6 + z.Owner.Msgsize() + 7 + hsp.Uint64Size + 10 + z.TokenType.Msgsize() + 6 + hsp.ArrayHeaderSize for za0002 := range z.Users { if z.Users[za0002] == nil { s += hsp.NilSize @@ -247,7 +216,6 @@ func (z *SQLChainProfile) Msgsize() (s int) { s += z.Users[za0002].Msgsize() } } - s += 15 + hsp.BytesPrefixSize + len(z.EncodedGenesis) + 6 + z.Owner.Msgsize() + 8 + z.Address.Msgsize() + 3 + z.ID.Msgsize() + 18 + hsp.Uint32Size + 7 + hsp.Uint64Size + 9 + hsp.Uint64Size return } @@ -270,28 +238,23 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) - o = hsp.AppendInt32(o, int32(z.Status)) - o = append(o, 0x86) - o = hsp.AppendInt32(o, int32(z.Permission)) o = append(o, 0x86) if oTemp, err := z.Address.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendUint64(o, z.AdvancePayment) - o = append(o, 0x86) o = hsp.AppendUint64(o, z.Arrears) - o = append(o, 0x86) o = hsp.AppendUint64(o, z.Deposit) + o = hsp.AppendInt32(o, int32(z.Permission)) + o = hsp.AppendInt32(o, int32(z.Status)) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainUser) Msgsize() (s int) { - s = 1 + 7 + hsp.Int32Size + 11 + hsp.Int32Size + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + 11 + hsp.Int32Size + 7 + hsp.Int32Size return } @@ -314,20 +277,19 @@ func (z *UserArrears) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendUint64(o, z.Arrears) if oTemp, err := z.User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendUint64(o, z.Arrears) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UserArrears) Msgsize() (s int) { - s = 1 + 5 + z.User.Msgsize() + 8 + hsp.Uint64Size + s = 1 + 8 + hsp.Uint64Size + 5 + z.User.Msgsize() return } diff --git a/types/ack_type_gen.go b/types/ack_type_gen.go index 90fc7a814..3537af696 100644 --- a/types/ack_type_gen.go +++ b/types/ack_type_gen.go @@ -11,14 +11,27 @@ func (z *Ack) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x83) + if oTemp, err := z.Header.AckHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Header.AckHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendTime(o, z.Header.AckHeader.Timestamp) + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -28,7 +41,7 @@ func (z *Ack) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Ack) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 10 + 1 + 9 + z.Header.AckHeader.Response.Msgsize() + 7 + z.Header.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -37,26 +50,24 @@ func (z *AckHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.Response.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.Response.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AckHeader) Msgsize() (s int) { - s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 10 + hsp.TimeSize return } @@ -81,21 +92,18 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 2 // map header, size 3 - o = append(o, 0x82, 0x82, 0x83, 0x83) + o = append(o, 0x82, 0x83) if oTemp, err := z.AckHeader.Response.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.AckHeader.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) o = hsp.AppendTime(o, z.AckHeader.Timestamp) - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { diff --git a/types/baseaccount_gen.go b/types/baseaccount_gen.go index 857c98fd1..e1a0955f4 100644 --- a/types/baseaccount_gen.go +++ b/types/baseaccount_gen.go @@ -11,13 +11,12 @@ func (z *BaseAccount) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Account.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { diff --git a/types/billing_gen.go b/types/billing_gen.go index bc636474f..6d5d57f6b 100644 --- a/types/billing_gen.go +++ b/types/billing_gen.go @@ -11,20 +11,18 @@ func (z *Billing) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.BillingHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *Billing) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Billing) Msgsize() (s int) { - s = 1 + 14 + z.BillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 14 + z.BillingHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,13 +41,26 @@ func (z *BillingHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.BillingRequest.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) + o = hsp.AppendArrayHeader(o, uint32(len(z.Fees))) + for za0002 := range z.Fees { + o = hsp.AppendUint64(o, z.Fees[za0002]) + } + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Producer.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Receivers))) for za0001 := range z.Receivers { if z.Receivers[za0001] == nil { @@ -62,34 +73,16 @@ func (z *BillingHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x86) - o = hsp.AppendArrayHeader(o, uint32(len(z.Fees))) - for za0002 := range z.Fees { - o = hsp.AppendUint64(o, z.Fees[za0002]) - } - o = append(o, 0x86) o = hsp.AppendArrayHeader(o, uint32(len(z.Rewards))) for za0003 := range z.Rewards { o = hsp.AppendUint64(o, z.Rewards[za0003]) } - o = append(o, 0x86) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x86) - if oTemp, err := z.Producer.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingHeader) Msgsize() (s int) { - s = 1 + 15 + z.BillingRequest.Msgsize() + 10 + hsp.ArrayHeaderSize + s = 1 + 15 + z.BillingRequest.Msgsize() + 5 + hsp.ArrayHeaderSize + (len(z.Fees) * (hsp.Uint64Size)) + 6 + z.Nonce.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.ArrayHeaderSize for za0001 := range z.Receivers { if z.Receivers[za0001] == nil { s += hsp.NilSize @@ -97,6 +90,6 @@ func (z *BillingHeader) Msgsize() (s int) { s += z.Receivers[za0001].Msgsize() } } - s += 5 + hsp.ArrayHeaderSize + (len(z.Fees) * (hsp.Uint64Size)) + 8 + hsp.ArrayHeaderSize + (len(z.Rewards) * (hsp.Uint64Size)) + 6 + z.Nonce.Msgsize() + 9 + z.Producer.Msgsize() + s += 8 + hsp.ArrayHeaderSize + (len(z.Rewards) * (hsp.Uint64Size)) return } diff --git a/types/billing_request_gen.go b/types/billing_request_gen.go index 48da8b034..9a631c25f 100644 --- a/types/billing_request_gen.go +++ b/types/billing_request_gen.go @@ -11,66 +11,62 @@ func (z *BillingRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Signees))) - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { + if oTemp, err := z.RequestHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendArrayHeader(o, uint32(len(z.Signatures))) + for za0002 := range z.Signatures { + if z.Signatures[za0002] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signees[za0001].MarshalHash(); err != nil { + if oTemp, err := z.Signatures[za0002].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Signatures))) - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.Signees))) + for za0001 := range z.Signees { + if z.Signees[za0001] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signatures[za0002].MarshalHash(); err != nil { + if oTemp, err := z.Signees[za0001].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) - if oTemp, err := z.RequestHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 8 + hsp.ArrayHeaderSize - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { + s = 1 + 7 + z.Header.Msgsize() + 12 + z.RequestHash.Msgsize() + 11 + hsp.ArrayHeaderSize + for za0002 := range z.Signatures { + if z.Signatures[za0002] == nil { s += hsp.NilSize } else { - s += z.Signees[za0001].Msgsize() + s += z.Signatures[za0002].Msgsize() } } - s += 11 + hsp.ArrayHeaderSize - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { + s += 8 + hsp.ArrayHeaderSize + for za0001 := range z.Signees { + if z.Signees[za0001] == nil { s += hsp.NilSize } else { - s += z.Signatures[za0002].Msgsize() + s += z.Signees[za0001].Msgsize() } } - s += 12 + z.RequestHash.Msgsize() return } @@ -79,7 +75,12 @@ func (z *BillingRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.GasAmounts))) for za0001 := range z.GasAmounts { if z.GasAmounts[za0001] == nil { @@ -92,34 +93,24 @@ func (z *BillingRequestHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x86) - if oTemp, err := z.LowBlock.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x86) if oTemp, err := z.HighBlock.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.LowHeight) - o = append(o, 0x86) o = hsp.AppendInt32(o, z.HighHeight) - o = append(o, 0x86) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + if oTemp, err := z.LowBlock.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + o = hsp.AppendInt32(o, z.LowHeight) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BillingRequestHeader) Msgsize() (s int) { - s = 1 + 11 + hsp.ArrayHeaderSize + s = 1 + 11 + z.DatabaseID.Msgsize() + 11 + hsp.ArrayHeaderSize for za0001 := range z.GasAmounts { if z.GasAmounts[za0001] == nil { s += hsp.NilSize @@ -127,6 +118,6 @@ func (z *BillingRequestHeader) Msgsize() (s int) { s += z.GasAmounts[za0001].Msgsize() } } - s += 9 + z.LowBlock.Msgsize() + 10 + z.HighBlock.Msgsize() + 10 + hsp.Int32Size + 11 + hsp.Int32Size + 11 + z.DatabaseID.Msgsize() + s += 10 + z.HighBlock.Msgsize() + 11 + hsp.Int32Size + 9 + z.LowBlock.Msgsize() + 10 + hsp.Int32Size return } diff --git a/types/block_gen.go b/types/block_gen.go index bf8ef8bd6..685522af3 100644 --- a/types/block_gen.go +++ b/types/block_gen.go @@ -11,33 +11,19 @@ func (z *Block) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - // map header, size 2 - o = append(o, 0x84, 0x84, 0x82, 0x82) - if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) - for za0002 := range z.QueryTxs { - if z.QueryTxs[za0002] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { + if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.FailedReqs))) for za0001 := range z.FailedReqs { if z.FailedReqs[za0001] == nil { @@ -50,30 +36,41 @@ func (z *Block) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x84) - o = hsp.AppendArrayHeader(o, uint32(len(z.Acks))) - for za0003 := range z.Acks { - if z.Acks[za0003] == nil { + o = hsp.AppendArrayHeader(o, uint32(len(z.QueryTxs))) + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Acks[za0003].MarshalHash(); err != nil { + if oTemp, err := z.QueryTxs[za0002].MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } } + // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.SignedHeader.Header.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedHeader.HSV.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 1 + 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() + 9 + hsp.ArrayHeaderSize - for za0002 := range z.QueryTxs { - if z.QueryTxs[za0002] == nil { + s = 1 + 5 + hsp.ArrayHeaderSize + for za0003 := range z.Acks { + if z.Acks[za0003] == nil { s += hsp.NilSize } else { - s += z.QueryTxs[za0002].Msgsize() + s += z.Acks[za0003].Msgsize() } } s += 11 + hsp.ArrayHeaderSize @@ -84,14 +81,15 @@ func (z *Block) Msgsize() (s int) { s += z.FailedReqs[za0001].Msgsize() } } - s += 5 + hsp.ArrayHeaderSize - for za0003 := range z.Acks { - if z.Acks[za0003] == nil { + s += 9 + hsp.ArrayHeaderSize + for za0002 := range z.QueryTxs { + if z.QueryTxs[za0002] == nil { s += hsp.NilSize } else { - s += z.Acks[za0003].Msgsize() + s += z.QueryTxs[za0002].Msgsize() } } + s += 13 + 1 + 7 + z.SignedHeader.Header.Msgsize() + 4 + z.SignedHeader.HSV.Msgsize() return } @@ -132,40 +130,35 @@ func (z *Header) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x86) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Header) Msgsize() (s int) { - s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -174,7 +167,7 @@ func (z *QueryAsTx) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if z.Request == nil { o = hsp.AppendNil(o) } else { @@ -184,7 +177,6 @@ func (z *QueryAsTx) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x82) if z.Response == nil { o = hsp.AppendNil(o) } else { @@ -219,14 +211,13 @@ func (z *SignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.HSV.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.HSV.MarshalHash(); err != nil { + if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -236,6 +227,6 @@ func (z *SignedHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedHeader) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 4 + z.HSV.Msgsize() + s = 1 + 4 + z.HSV.Msgsize() + 7 + z.Header.Msgsize() return } diff --git a/types/bp_block_gen.go b/types/bp_block_gen.go index 5afbe938a..b98854a4c 100644 --- a/types/bp_block_gen.go +++ b/types/bp_block_gen.go @@ -11,13 +11,12 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.SignedHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.Transactions))) for za0001 := range z.Transactions { if oTemp, err := z.Transactions[za0001].MarshalHash(); err != nil { @@ -43,34 +42,30 @@ func (z *BPHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) + o = append(o, 0x85) if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x85) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPHeader) Msgsize() (s int) { - s = 1 + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -79,55 +74,51 @@ func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) - if z.Signee == nil { + o = append(o, 0x84) + if oTemp, err := z.BPHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.BlockHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if z.Signature == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { + if oTemp, err := z.Signature.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - if z.Signature == nil { + if z.Signee == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { + if oTemp, err := z.Signee.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - if oTemp, err := z.BPHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x84) - if oTemp, err := z.BlockHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPSignedHeader) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { + s = 1 + 9 + z.BPHeader.Msgsize() + 10 + z.BlockHash.Msgsize() + 10 + if z.Signature == nil { s += hsp.NilSize } else { - s += z.Signee.Msgsize() + s += z.Signature.Msgsize() } - s += 10 - if z.Signature == nil { + s += 7 + if z.Signee == nil { s += hsp.NilSize } else { - s += z.Signature.Msgsize() + s += z.Signee.Msgsize() } - s += 9 + z.BPHeader.Msgsize() + 10 + z.BlockHash.Msgsize() return } diff --git a/types/createdb_gen.go b/types/createdb_gen.go index f72127991..1fe24735b 100644 --- a/types/createdb_gen.go +++ b/types/createdb_gen.go @@ -11,20 +11,18 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) if oTemp, err := z.CreateDatabaseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *CreateDatabase) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabase) Msgsize() (s int) { - s = 1 + 21 + z.CreateDatabaseHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 21 + z.CreateDatabaseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,39 +41,34 @@ func (z *CreateDatabaseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) - if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { + o = append(o, 0x86) + o = hsp.AppendUint64(o, z.AdvancePayment) + o = hsp.AppendUint64(o, z.GasPrice) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + if oTemp, err := z.Owner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { + if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.Owner.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x86) - o = hsp.AppendUint64(o, z.AdvancePayment) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseHeader) Msgsize() (s int) { - s = 1 + 13 + z.ResourceMeta.Msgsize() + 10 + z.TokenType.Msgsize() + 6 + z.Nonce.Msgsize() + 6 + z.Owner.Msgsize() + 9 + hsp.Uint64Size + 15 + hsp.Uint64Size + s = 1 + 15 + hsp.Uint64Size + 9 + hsp.Uint64Size + 6 + z.Nonce.Msgsize() + 6 + z.Owner.Msgsize() + 13 + z.ResourceMeta.Msgsize() + 10 + z.TokenType.Msgsize() return } diff --git a/types/db_service_types_gen.go b/types/db_service_types_gen.go index 188fcada8..d072402d4 100644 --- a/types/db_service_types_gen.go +++ b/types/db_service_types_gen.go @@ -11,22 +11,21 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -36,7 +35,7 @@ func (z *CreateDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 28 + 1 + 13 + z.Header.CreateDatabaseRequestHeader.ResourceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -45,7 +44,7 @@ func (z *CreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -65,22 +64,21 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -90,7 +88,7 @@ func (z *CreateDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *CreateDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 29 + 1 + 13 + z.Header.CreateDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -99,7 +97,7 @@ func (z *CreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -119,22 +117,21 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -144,7 +141,7 @@ func (z *DropDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DropDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 26 + 1 + 11 + z.Header.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -153,7 +150,7 @@ func (z *DropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { @@ -188,22 +185,21 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -213,7 +209,7 @@ func (z *GetDatabaseRequest) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseRequest) Msgsize() (s int) { - s = 1 + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 25 + 1 + 11 + z.Header.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -222,7 +218,7 @@ func (z *GetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { @@ -242,22 +238,21 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x82, 0x81) + if oTemp, err := z.Header.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -267,7 +262,7 @@ func (z *GetDatabaseResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *GetDatabaseResponse) Msgsize() (s int) { - s = 1 + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 26 + 1 + 13 + z.Header.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -276,7 +271,7 @@ func (z *GetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { @@ -297,13 +292,12 @@ func (z *SignedCreateDatabaseRequestHeader) MarshalHash() (o []byte, err error) o = hsp.Require(b, z.Msgsize()) // map header, size 2 // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x82, 0x81) if oTemp, err := z.CreateDatabaseRequestHeader.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -324,13 +318,12 @@ func (z *SignedCreateDatabaseResponseHeader) MarshalHash() (o []byte, err error) o = hsp.Require(b, z.Msgsize()) // map header, size 2 // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x82, 0x81) if oTemp, err := z.CreateDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -350,15 +343,15 @@ func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.DropDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -368,7 +361,7 @@ func (z *SignedDropDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedDropDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 11 + z.DropDatabaseRequestHeader.DatabaseID.Msgsize() return } @@ -377,15 +370,15 @@ func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.GetDatabaseRequestHeader.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -395,7 +388,7 @@ func (z *SignedGetDatabaseRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseRequestHeader) Msgsize() (s int) { - s = 1 + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 25 + 1 + 11 + z.GetDatabaseRequestHeader.DatabaseID.Msgsize() return } @@ -404,15 +397,15 @@ func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) - if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + // map header, size 1 + o = append(o, 0x81) + if oTemp, err := z.GetDatabaseResponseHeader.InstanceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -422,6 +415,6 @@ func (z *SignedGetDatabaseResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedGetDatabaseResponseHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 13 + z.GetDatabaseResponseHeader.InstanceMeta.Msgsize() return } diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go index b289ec50d..4e8438bfa 100644 --- a/types/init_service_type_gen.go +++ b/types/init_service_type_gen.go @@ -11,7 +11,7 @@ func (z *InitService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { @@ -31,8 +31,18 @@ func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) - if oTemp, err := z.Header.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 1 + o = append(o, 0x81, 0x82, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Header.InitServiceResponseHeader.Instances))) + for za0001 := range z.Header.InitServiceResponseHeader.Instances { + if oTemp, err := z.Header.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -42,7 +52,11 @@ func (z *InitServiceResponse) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *InitServiceResponse) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + s = 1 + 7 + 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + for za0001 := range z.Header.InitServiceResponseHeader.Instances { + s += z.Header.InitServiceResponseHeader.Instances[za0001].Msgsize() + } + s += 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -51,7 +65,7 @@ func (z *InitServiceResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Instances))) for za0001 := range z.Instances { if oTemp, err := z.Instances[za0001].MarshalHash(); err != nil { @@ -77,7 +91,13 @@ func (z *ResourceMeta) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) + o = append(o, 0x88) + o = hsp.AppendFloat64(o, z.ConsistencyLevel) + o = hsp.AppendString(o, z.EncryptionKey) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + o = hsp.AppendUint16(o, z.Node) + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetMiners))) for za0001 := range z.TargetMiners { if oTemp, err := z.TargetMiners[za0001].MarshalHash(); err != nil { @@ -86,30 +106,17 @@ func (z *ResourceMeta) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x88) o = hsp.AppendBool(o, z.UseEventualConsistency) - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.ConsistencyLevel) - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x88) - o = hsp.AppendString(o, z.EncryptionKey) - o = append(o, 0x88) - o = hsp.AppendUint16(o, z.Node) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResourceMeta) Msgsize() (s int) { - s = 1 + 13 + hsp.ArrayHeaderSize + s = 1 + 17 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 13 + hsp.ArrayHeaderSize for za0001 := range z.TargetMiners { s += z.TargetMiners[za0001].Msgsize() } - s += 23 + hsp.BoolSize + 17 + hsp.Float64Size + 14 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 23 + hsp.BoolSize return } @@ -118,7 +125,12 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if z.GenesisBlock == nil { o = hsp.AppendNil(o) } else { @@ -128,7 +140,6 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) if z.Peers == nil { o = hsp.AppendNil(o) } else { @@ -138,24 +149,17 @@ func (z *ServiceInstance) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) if oTemp, err := z.ResourceMeta.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ServiceInstance) Msgsize() (s int) { - s = 1 + 13 + s = 1 + 11 + z.DatabaseID.Msgsize() + 13 if z.GenesisBlock == nil { s += hsp.NilSize } else { @@ -167,7 +171,7 @@ func (z *ServiceInstance) Msgsize() (s int) { } else { s += z.Peers.Msgsize() } - s += 13 + z.ResourceMeta.Msgsize() + 11 + z.DatabaseID.Msgsize() + s += 13 + z.ResourceMeta.Msgsize() return } @@ -176,8 +180,14 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } // map header, size 1 - o = append(o, 0x82, 0x82, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.InitServiceResponseHeader.Instances))) for za0001 := range z.InitServiceResponseHeader.Instances { if oTemp, err := z.InitServiceResponseHeader.Instances[za0001].MarshalHash(); err != nil { @@ -186,21 +196,14 @@ func (z *SignedInitServiceResponseHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedInitServiceResponseHeader) Msgsize() (s int) { - s = 1 + 26 + 1 + 10 + hsp.ArrayHeaderSize + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 26 + 1 + 10 + hsp.ArrayHeaderSize for za0001 := range z.InitServiceResponseHeader.Instances { s += z.InitServiceResponseHeader.Instances[za0001].Msgsize() } - s += 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/issuekeys_gen.go b/types/issuekeys_gen.go index 79e86df52..829f88745 100644 --- a/types/issuekeys_gen.go +++ b/types/issuekeys_gen.go @@ -11,20 +11,18 @@ func (z *IssueKeys) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.IssueKeysHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.IssueKeysHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *IssueKeys) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *IssueKeys) Msgsize() (s int) { - s = 1 + 16 + z.IssueKeysHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 16 + z.IssueKeysHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,26 +41,23 @@ func (z *IssueKeysHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) o = hsp.AppendArrayHeader(o, uint32(len(z.MinerKeys))) for za0001 := range z.MinerKeys { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.MinerKeys[za0001].Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendString(o, z.MinerKeys[za0001].EncryptionKey) } - o = append(o, 0x83) if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TargetSQLChain.MarshalHash(); err != nil { return nil, err } else { @@ -86,19 +81,18 @@ func (z *MinerKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendString(o, z.EncryptionKey) if oTemp, err := z.Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendString(o, z.EncryptionKey) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerKey) Msgsize() (s int) { - s = 1 + 6 + z.Miner.Msgsize() + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + s = 1 + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 6 + z.Miner.Msgsize() return } diff --git a/types/no_ack_report_type_gen.go b/types/no_ack_report_type_gen.go index e9e89abc6..2db24c1e7 100644 --- a/types/no_ack_report_type_gen.go +++ b/types/no_ack_report_type_gen.go @@ -11,21 +11,20 @@ func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82, 0x82) - if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -35,7 +34,7 @@ func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AggrNoAckReport) Msgsize() (s int) { - s = 1 + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -44,7 +43,12 @@ func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) + o = append(o, 0x84) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } if z.Peers == nil { o = hsp.AppendNil(o) } else { @@ -54,29 +58,35 @@ func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) o = hsp.AppendArrayHeader(o, uint32(len(z.Reports))) for za0001 := range z.Reports { - if oTemp, err := z.Reports[za0001].MarshalHash(); err != nil { + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x83) + if oTemp, err := z.Reports[za0001].NoAckReportHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendTime(o, z.Reports[za0001].NoAckReportHeader.Timestamp) + if oTemp, err := z.Reports[za0001].NoAckReportHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Reports[za0001].DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x84) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x84) o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AggrNoAckReportHeader) Msgsize() (s int) { - s = 1 + 6 + s = 1 + 7 + z.NodeID.Msgsize() + 6 if z.Peers == nil { s += hsp.NilSize } else { @@ -84,9 +94,9 @@ func (z *AggrNoAckReportHeader) Msgsize() (s int) { } s += 8 + hsp.ArrayHeaderSize for za0001 := range z.Reports { - s += z.Reports[za0001].Msgsize() + s += 1 + 18 + 1 + 7 + z.Reports[za0001].NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.Reports[za0001].NoAckReportHeader.Response.Msgsize() + 28 + z.Reports[za0001].DefaultHashSignVerifierImpl.Msgsize() } - s += 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + s += 10 + hsp.TimeSize return } @@ -95,14 +105,27 @@ func (z *NoAckReport) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + // map header, size 2 + // map header, size 3 + o = append(o, 0x82, 0x83) + if oTemp, err := z.Header.NoAckReportHeader.NodeID.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendTime(o, z.Header.NoAckReportHeader.Timestamp) + if oTemp, err := z.Header.NoAckReportHeader.Response.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -112,7 +135,7 @@ func (z *NoAckReport) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *NoAckReport) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 18 + 1 + 7 + z.Header.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.Header.NoAckReportHeader.Response.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -121,26 +144,24 @@ func (z *NoAckReportHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.Response.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.Response.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *NoAckReportHeader) Msgsize() (s int) { - s = 1 + 9 + z.Response.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 10 + hsp.TimeSize return } @@ -149,13 +170,12 @@ func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.AggrNoAckReportHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -175,23 +195,21 @@ func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x82, 0x83, 0x83) - if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 3 o = append(o, 0x83) - o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) - o = append(o, 0x83) - if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { + if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) + if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -201,6 +219,6 @@ func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedNoAckReportHeader) Msgsize() (s int) { - s = 1 + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() return } diff --git a/types/provideservice_gen.go b/types/provideservice_gen.go index c3925d3b1..78531baea 100644 --- a/types/provideservice_gen.go +++ b/types/provideservice_gen.go @@ -11,20 +11,18 @@ func (z *ProvideService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.ProvideServiceHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { + if oTemp, err := z.ProvideServiceHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *ProvideService) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProvideService) Msgsize() (s int) { - s = 1 + 21 + z.ProvideServiceHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.ProvideServiceHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() return } @@ -43,13 +41,21 @@ func (z *ProvideServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x88) + o = hsp.AppendUint64(o, z.GasPrice) + o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) + o = hsp.AppendUint64(o, z.Memory) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + o = hsp.AppendUint64(o, z.Space) o = hsp.AppendArrayHeader(o, uint32(len(z.TargetUser))) for za0001 := range z.TargetUser { if oTemp, err := z.TargetUser[za0001].MarshalHash(); err != nil { @@ -58,35 +64,20 @@ func (z *ProvideServiceHeader) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x88) - o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) - o = append(o, 0x88) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.GasPrice) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Space) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.Memory) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ProvideServiceHeader) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 11 + hsp.ArrayHeaderSize + s = 1 + 9 + hsp.Uint64Size + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 6 + z.Nonce.Msgsize() + 6 + hsp.Uint64Size + 11 + hsp.ArrayHeaderSize for za0001 := range z.TargetUser { s += z.TargetUser[za0001].Msgsize() } - s += 14 + hsp.Float64Size + 6 + z.Nonce.Msgsize() + 7 + z.NodeID.Msgsize() + 9 + hsp.Uint64Size + 6 + hsp.Uint64Size + 7 + hsp.Uint64Size + s += 10 + z.TokenType.Msgsize() return } diff --git a/types/request_type_gen.go b/types/request_type_gen.go index 709e66bb9..d8cc7da03 100644 --- a/types/request_type_gen.go +++ b/types/request_type_gen.go @@ -11,19 +11,18 @@ func (z NamedArg) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendString(o, z.Name) o, err = hsp.AppendIntf(o, z.Value) if err != nil { return } - o = append(o, 0x82) - o = hsp.AppendString(o, z.Name) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z NamedArg) Msgsize() (s int) { - s = 1 + 6 + hsp.GuessSize(z.Value) + 5 + hsp.StringPrefixSize + len(z.Name) + s = 1 + 5 + hsp.StringPrefixSize + len(z.Name) + 6 + hsp.GuessSize(z.Value) return } @@ -32,19 +31,17 @@ func (z *Query) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) o = hsp.AppendArrayHeader(o, uint32(len(z.Args))) for za0001 := range z.Args { // map header, size 2 - o = append(o, 0x82, 0x82) - o = hsp.AppendString(o, z.Args[za0001].Name) o = append(o, 0x82) + o = hsp.AppendString(o, z.Args[za0001].Name) o, err = hsp.AppendIntf(o, z.Args[za0001].Value) if err != nil { return } } - o = append(o, 0x82) o = hsp.AppendString(o, z.Pattern) return } @@ -64,22 +61,20 @@ func (z *QueryKey) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.ConnectionID) if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x83) o = hsp.AppendUint64(o, z.SeqNo) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *QueryKey) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + s = 1 + 13 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 6 + hsp.Uint64Size return } @@ -102,45 +97,43 @@ func (z *Request) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - // map header, size 1 - o = append(o, 0x83, 0x83, 0x81, 0x81) - o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) - for za0001 := range z.Payload.Queries { - if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - // map header, size 2 - o = append(o, 0x83, 0x82, 0x82) - if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Header.RequestHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 1 + o = append(o, 0x81) + o = hsp.AppendArrayHeader(o, uint32(len(z.Payload.Queries))) + for za0001 := range z.Payload.Queries { + if oTemp, err := z.Payload.Queries[za0001].MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Request) Msgsize() (s int) { - s = 1 + 8 + 1 + 8 + hsp.ArrayHeaderSize + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 8 + 1 + 8 + hsp.ArrayHeaderSize for za0001 := range z.Payload.Queries { s += z.Payload.Queries[za0001].Msgsize() } - s += 7 + 1 + 14 + z.Header.RequestHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 9 + z.Envelope.Msgsize() return } @@ -149,40 +142,33 @@ func (z *RequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) - o = hsp.AppendInt32(o, int32(z.QueryType)) o = append(o, 0x88) - if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { + o = hsp.AppendUint64(o, z.BatchCount) + o = hsp.AppendUint64(o, z.ConnectionID) + if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.QueriesHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.ConnectionID) - o = append(o, 0x88) + o = hsp.AppendInt32(o, int32(z.QueryType)) o = hsp.AppendUint64(o, z.SeqNo) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.BatchCount) + o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *RequestHeader) Msgsize() (s int) { - s = 1 + 10 + hsp.Int32Size + 12 + z.QueriesHash.Msgsize() + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 13 + hsp.Uint64Size + 6 + hsp.Uint64Size + 11 + hsp.Uint64Size + s = 1 + 11 + hsp.Uint64Size + 13 + hsp.Uint64Size + 11 + z.DatabaseID.Msgsize() + 7 + z.NodeID.Msgsize() + 12 + z.QueriesHash.Msgsize() + 10 + hsp.Int32Size + 6 + hsp.Uint64Size + 10 + hsp.TimeSize return } @@ -191,7 +177,7 @@ func (z *RequestPayload) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Queries))) for za0001 := range z.Queries { if oTemp, err := z.Queries[za0001].MarshalHash(); err != nil { @@ -217,14 +203,13 @@ func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.RequestHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -234,6 +219,6 @@ func (z *SignedRequestHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedRequestHeader) Msgsize() (s int) { - s = 1 + 14 + z.RequestHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 14 + z.RequestHeader.Msgsize() return } diff --git a/types/response_type_gen.go b/types/response_type_gen.go index 78c9db9c3..0c97de2eb 100644 --- a/types/response_type_gen.go +++ b/types/response_type_gen.go @@ -11,21 +11,19 @@ func (z *Response) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 + // map header, size 2 o = append(o, 0x82, 0x82) - if oTemp, err := z.Payload.MarshalHash(); err != nil { + if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82) - if oTemp, err := z.Header.ResponseHeader.MarshalHash(); err != nil { + if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Payload.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -35,7 +33,7 @@ func (z *Response) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Response) Msgsize() (s int) { - s = 1 + 8 + z.Payload.Msgsize() + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 8 + z.Payload.Msgsize() return } @@ -44,40 +42,33 @@ func (z *ResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 8 - o = append(o, 0x88, 0x88) - if oTemp, err := z.Request.MarshalHash(); err != nil { + o = append(o, 0x88) + o = hsp.AppendInt64(o, z.AffectedRows) + o = hsp.AppendInt64(o, z.LastInsertID) + o = hsp.AppendUint64(o, z.LogOffset) + if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) if oTemp, err := z.PayloadHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendInt64(o, z.LastInsertID) - o = append(o, 0x88) - o = hsp.AppendInt64(o, z.AffectedRows) - o = append(o, 0x88) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { + if oTemp, err := z.Request.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x88) - o = hsp.AppendTime(o, z.Timestamp) - o = append(o, 0x88) o = hsp.AppendUint64(o, z.RowCount) - o = append(o, 0x88) - o = hsp.AppendUint64(o, z.LogOffset) + o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponseHeader) Msgsize() (s int) { - s = 1 + 8 + z.Request.Msgsize() + 12 + z.PayloadHash.Msgsize() + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 7 + z.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + hsp.Uint64Size + 10 + hsp.Uint64Size + s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize return } @@ -86,11 +77,19 @@ func (z *ResponsePayload) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) + for za0001 := range z.Columns { + o = hsp.AppendString(o, z.Columns[za0001]) + } + o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) + for za0002 := range z.DeclTypes { + o = hsp.AppendString(o, z.DeclTypes[za0002]) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Rows))) for za0003 := range z.Rows { // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Rows[za0003].Values))) for za0004 := range z.Rows[za0003].Values { o, err = hsp.AppendIntf(o, z.Rows[za0003].Values[za0004]) @@ -99,29 +98,12 @@ func (z *ResponsePayload) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) - o = hsp.AppendArrayHeader(o, uint32(len(z.Columns))) - for za0001 := range z.Columns { - o = hsp.AppendString(o, z.Columns[za0001]) - } - o = append(o, 0x83) - o = hsp.AppendArrayHeader(o, uint32(len(z.DeclTypes))) - for za0002 := range z.DeclTypes { - o = hsp.AppendString(o, z.DeclTypes[za0002]) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponsePayload) Msgsize() (s int) { - s = 1 + 5 + hsp.ArrayHeaderSize - for za0003 := range z.Rows { - s += 1 + 7 + hsp.ArrayHeaderSize - for za0004 := range z.Rows[za0003].Values { - s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) - } - } - s += 8 + hsp.ArrayHeaderSize + s = 1 + 8 + hsp.ArrayHeaderSize for za0001 := range z.Columns { s += hsp.StringPrefixSize + len(z.Columns[za0001]) } @@ -129,6 +111,13 @@ func (z *ResponsePayload) Msgsize() (s int) { for za0002 := range z.DeclTypes { s += hsp.StringPrefixSize + len(z.DeclTypes[za0002]) } + s += 5 + hsp.ArrayHeaderSize + for za0003 := range z.Rows { + s += 1 + 7 + hsp.ArrayHeaderSize + for za0004 := range z.Rows[za0003].Values { + s += hsp.GuessSize(z.Rows[za0003].Values[za0004]) + } + } return } @@ -137,7 +126,7 @@ func (z *ResponseRow) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 1 - o = append(o, 0x81, 0x81) + o = append(o, 0x81) o = hsp.AppendArrayHeader(o, uint32(len(z.Values))) for za0001 := range z.Values { o, err = hsp.AppendIntf(o, z.Values[za0001]) @@ -162,14 +151,13 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.ResponseHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -179,6 +167,6 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedResponseHeader) Msgsize() (s int) { - s = 1 + 15 + z.ResponseHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 15 + z.ResponseHeader.Msgsize() return } diff --git a/types/transfer_gen.go b/types/transfer_gen.go index 8c918b251..0fcc22b0f 100644 --- a/types/transfer_gen.go +++ b/types/transfer_gen.go @@ -11,20 +11,18 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.TransferHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *Transfer) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Transfer) Msgsize() (s int) { - s = 1 + 15 + z.TransferHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 15 + z.TransferHeader.Msgsize() return } @@ -43,37 +41,33 @@ func (z *TransferHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 5 - o = append(o, 0x85, 0x85) - if oTemp, err := z.TokenType.MarshalHash(); err != nil { + o = append(o, 0x85) + o = hsp.AppendUint64(o, z.Amount) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { + if oTemp, err := z.Receiver.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) if oTemp, err := z.Sender.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - if oTemp, err := z.Receiver.MarshalHash(); err != nil { + if oTemp, err := z.TokenType.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x85) - o = hsp.AppendUint64(o, z.Amount) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *TransferHeader) Msgsize() (s int) { - s = 1 + 10 + z.TokenType.Msgsize() + 6 + z.Nonce.Msgsize() + 7 + z.Sender.Msgsize() + 9 + z.Receiver.Msgsize() + 7 + hsp.Uint64Size + s = 1 + 7 + hsp.Uint64Size + 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() + 7 + z.Sender.Msgsize() + 10 + z.TokenType.Msgsize() return } diff --git a/types/update_service_type_gen.go b/types/update_service_type_gen.go index c134c6635..e3bbb2f26 100644 --- a/types/update_service_type_gen.go +++ b/types/update_service_type_gen.go @@ -11,17 +11,16 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 2 - o = append(o, 0x82, 0x82, 0x82, 0x82) - o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) o = append(o, 0x82) - if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + o = hsp.AppendInt32(o, int32(z.UpdateServiceHeader.Op)) + if oTemp, err := z.UpdateServiceHeader.Instance.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -31,7 +30,7 @@ func (z *SignedUpdateServiceHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedUpdateServiceHeader) Msgsize() (s int) { - s = 1 + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 20 + 1 + 3 + hsp.Int32Size + 9 + z.UpdateServiceHeader.Instance.Msgsize() return } @@ -40,14 +39,13 @@ func (z *UpdateService) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) - if oTemp, err := z.Header.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Envelope.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { + if oTemp, err := z.Header.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -57,7 +55,7 @@ func (z *UpdateService) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateService) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 9 + z.Envelope.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + z.Header.Msgsize() return } @@ -66,13 +64,12 @@ func (z *UpdateServiceHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Instance.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendInt32(o, int32(z.Op)) return } diff --git a/types/updatebilling_gen.go b/types/updatebilling_gen.go index fb471f34f..79a626ffb 100644 --- a/types/updatebilling_gen.go +++ b/types/updatebilling_gen.go @@ -11,20 +11,19 @@ func (z *MinerIncome) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) + o = hsp.AppendUint64(o, z.Income) if oTemp, err := z.Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) - o = hsp.AppendUint64(o, z.Income) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *MinerIncome) Msgsize() (s int) { - s = 1 + 6 + z.Miner.Msgsize() + 7 + hsp.Uint64Size + s = 1 + 7 + hsp.Uint64Size + 6 + z.Miner.Msgsize() return } @@ -33,20 +32,18 @@ func (z *UpdateBilling) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.UpdateBillingHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.UpdateBillingHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -56,7 +53,7 @@ func (z *UpdateBilling) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateBilling) Msgsize() (s int) { - s = 1 + 20 + z.UpdateBillingHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 20 + z.UpdateBillingHeader.Msgsize() return } @@ -65,7 +62,17 @@ func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.Receiver.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.Users))) for za0001 := range z.Users { if z.Users[za0001] == nil { @@ -78,24 +85,12 @@ func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x83) - if oTemp, err := z.Receiver.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateBillingHeader) Msgsize() (s int) { - s = 1 + 6 + hsp.ArrayHeaderSize + s = 1 + 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize for za0001 := range z.Users { if z.Users[za0001] == nil { s += hsp.NilSize @@ -103,7 +98,6 @@ func (z *UpdateBillingHeader) Msgsize() (s int) { s += z.Users[za0001].Msgsize() } } - s += 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() return } @@ -112,37 +106,34 @@ func (z *UserCost) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) + o = append(o, 0x83) + o = hsp.AppendUint64(o, z.Cost) o = hsp.AppendArrayHeader(o, uint32(len(z.Miners))) for za0001 := range z.Miners { if z.Miners[za0001] == nil { o = hsp.AppendNil(o) } else { // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.Miners[za0001].Miner.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) o = hsp.AppendUint64(o, z.Miners[za0001].Income) } } - o = append(o, 0x83) if oTemp, err := z.User.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - o = hsp.AppendUint64(o, z.Cost) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UserCost) Msgsize() (s int) { - s = 1 + 7 + hsp.ArrayHeaderSize + s = 1 + 5 + hsp.Uint64Size + 7 + hsp.ArrayHeaderSize for za0001 := range z.Miners { if z.Miners[za0001] == nil { s += hsp.NilSize @@ -150,6 +141,6 @@ func (z *UserCost) Msgsize() (s int) { s += 1 + 6 + z.Miners[za0001].Miner.Msgsize() + 7 + hsp.Uint64Size } } - s += 5 + z.User.Msgsize() + 5 + hsp.Uint64Size + s += 5 + z.User.Msgsize() return } diff --git a/types/updatepermission_gen.go b/types/updatepermission_gen.go index 443bfaa78..9c54d6e2f 100644 --- a/types/updatepermission_gen.go +++ b/types/updatepermission_gen.go @@ -11,20 +11,18 @@ func (z *UpdatePermission) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if oTemp, err := z.UpdatePermissionHeader.MarshalHash(); err != nil { + o = append(o, 0x83) + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x83) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.UpdatePermissionHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -34,7 +32,7 @@ func (z *UpdatePermission) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermission) Msgsize() (s int) { - s = 1 + 23 + z.UpdatePermissionHeader.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() + 23 + z.UpdatePermissionHeader.Msgsize() return } @@ -43,25 +41,22 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 4 - o = append(o, 0x84, 0x84) - if oTemp, err := z.Permission.MarshalHash(); err != nil { + o = append(o, 0x84) + if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) - if oTemp, err := z.Nonce.MarshalHash(); err != nil { + if oTemp, err := z.Permission.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) if oTemp, err := z.TargetSQLChain.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x84) if oTemp, err := z.TargetUser.MarshalHash(); err != nil { return nil, err } else { @@ -72,6 +67,6 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermissionHeader) Msgsize() (s int) { - s = 1 + 11 + z.Permission.Msgsize() + 6 + z.Nonce.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() + s = 1 + 6 + z.Nonce.Msgsize() + 11 + z.Permission.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() return } diff --git a/xenomint/types/block_gen.go b/xenomint/types/block_gen.go index 4c19a8bf2..ba4ad7732 100644 --- a/xenomint/types/block_gen.go +++ b/xenomint/types/block_gen.go @@ -11,19 +11,6 @@ func (z *Block) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - // map header, size 2 - o = append(o, 0x83, 0x83, 0x82, 0x82) - if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = append(o, 0x82) - if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } o = append(o, 0x83) o = hsp.AppendArrayHeader(o, uint32(len(z.ReadQueries))) for za0001 := range z.ReadQueries { @@ -37,7 +24,18 @@ func (z *Block) MarshalHash() (o []byte, err error) { } } } - o = append(o, 0x83) + // map header, size 2 + o = append(o, 0x82) + if oTemp, err := z.SignedBlockHeader.BlockHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedBlockHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendArrayHeader(o, uint32(len(z.WriteQueries))) for za0002 := range z.WriteQueries { if z.WriteQueries[za0002] == nil { @@ -55,7 +53,7 @@ func (z *Block) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Block) Msgsize() (s int) { - s = 1 + 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 12 + hsp.ArrayHeaderSize + s = 1 + 12 + hsp.ArrayHeaderSize for za0001 := range z.ReadQueries { if z.ReadQueries[za0001] == nil { s += hsp.NilSize @@ -63,7 +61,7 @@ func (z *Block) Msgsize() (s int) { s += z.ReadQueries[za0001].Msgsize() } } - s += 13 + hsp.ArrayHeaderSize + s += 18 + 1 + 12 + z.SignedBlockHeader.BlockHeader.Msgsize() + 28 + z.SignedBlockHeader.DefaultHashSignVerifierImpl.Msgsize() + 13 + hsp.ArrayHeaderSize for za0002 := range z.WriteQueries { if z.WriteQueries[za0002] == nil { s += hsp.NilSize @@ -79,40 +77,35 @@ func (z *BlockHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 6 - o = append(o, 0x86, 0x86) + o = append(o, 0x86) if oTemp, err := z.GenesisHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.ParentHash.MarshalHash(); err != nil { + if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - if oTemp, err := z.MerkleRoot.MarshalHash(); err != nil { + if oTemp, err := z.ParentHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) - o = hsp.AppendInt32(o, z.Version) - o = append(o, 0x86) if oTemp, err := z.Producer.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x86) o = hsp.AppendTime(o, z.Timestamp) + o = hsp.AppendInt32(o, z.Version) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BlockHeader) Msgsize() (s int) { - s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.ParentHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 8 + hsp.Int32Size + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + s = 1 + 12 + z.GenesisHash.Msgsize() + 11 + z.MerkleRoot.Msgsize() + 11 + z.ParentHash.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.TimeSize + 8 + hsp.Int32Size return } @@ -121,13 +114,12 @@ func (z *SignedBlockHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82, 0x82) + o = append(o, 0x82) if oTemp, err := z.BlockHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = append(o, 0x82) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { diff --git a/xenomint/types/common_gen.go b/xenomint/types/common_gen.go index 8fdb3ee08..d0fb74245 100644 --- a/xenomint/types/common_gen.go +++ b/xenomint/types/common_gen.go @@ -11,49 +11,46 @@ func (z *DefaultHashSignVerifierImpl) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 3 - o = append(o, 0x83, 0x83) - if z.Signee == nil { + o = append(o, 0x83) + if oTemp, err := z.DataHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if z.Signature == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { + if oTemp, err := z.Signature.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if z.Signature == nil { + if z.Signee == nil { o = hsp.AppendNil(o) } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { + if oTemp, err := z.Signee.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } } - o = append(o, 0x83) - if oTemp, err := z.DataHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *DefaultHashSignVerifierImpl) Msgsize() (s int) { - s = 1 + 7 - if z.Signee == nil { + s = 1 + 9 + z.DataHash.Msgsize() + 10 + if z.Signature == nil { s += hsp.NilSize } else { - s += z.Signee.Msgsize() + s += z.Signature.Msgsize() } - s += 10 - if z.Signature == nil { + s += 7 + if z.Signee == nil { s += hsp.NilSize } else { - s += z.Signature.Msgsize() + s += z.Signee.Msgsize() } - s += 9 + z.DataHash.Msgsize() return } From c86dca0e2d0e15fbf764dcd61aaaa3f2cf41684a Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 28 Jan 2019 12:50:09 +0800 Subject: [PATCH 237/302] Regen hsp --- types/bp_block_gen.go | 49 +++++++++------------------------- types/init_service_type_gen.go | 7 ++--- 2 files changed, 17 insertions(+), 39 deletions(-) diff --git a/types/bp_block_gen.go b/types/bp_block_gen.go index b98854a4c..2e9ac9701 100644 --- a/types/bp_block_gen.go +++ b/types/bp_block_gen.go @@ -11,8 +11,14 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.SignedHeader.MarshalHash(); err != nil { + // map header, size 2 + o = append(o, 0x82, 0x82) + if oTemp, err := z.SignedHeader.BPHeader.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + if oTemp, err := z.SignedHeader.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -30,7 +36,7 @@ func (z *BPBlock) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPBlock) Msgsize() (s int) { - s = 1 + 13 + z.SignedHeader.Msgsize() + 13 + hsp.ArrayHeaderSize + s = 1 + 13 + 1 + 9 + z.SignedHeader.BPHeader.Msgsize() + 28 + z.SignedHeader.DefaultHashSignVerifierImpl.Msgsize() + 13 + hsp.ArrayHeaderSize for za0001 := range z.Transactions { s += z.Transactions[za0001].Msgsize() } @@ -73,52 +79,23 @@ func (z *BPHeader) Msgsize() (s int) { func (z *BPSignedHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84) + // map header, size 2 + o = append(o, 0x82) if oTemp, err := z.BPHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - if oTemp, err := z.BlockHash.MarshalHash(); err != nil { + if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - if z.Signature == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signature.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - if z.Signee == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signee.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BPSignedHeader) Msgsize() (s int) { - s = 1 + 9 + z.BPHeader.Msgsize() + 10 + z.BlockHash.Msgsize() + 10 - if z.Signature == nil { - s += hsp.NilSize - } else { - s += z.Signature.Msgsize() - } - s += 7 - if z.Signee == nil { - s += hsp.NilSize - } else { - s += z.Signee.Msgsize() - } + s = 1 + 9 + z.BPHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/init_service_type_gen.go b/types/init_service_type_gen.go index 4e8438bfa..dcb38de3c 100644 --- a/types/init_service_type_gen.go +++ b/types/init_service_type_gen.go @@ -90,10 +90,11 @@ func (z *InitServiceResponseHeader) Msgsize() (s int) { func (z *ResourceMeta) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88) + // map header, size 9 + o = append(o, 0x89) o = hsp.AppendFloat64(o, z.ConsistencyLevel) o = hsp.AppendString(o, z.EncryptionKey) + o = hsp.AppendInt(o, z.IsolationLevel) o = hsp.AppendFloat64(o, z.LoadAvgPerCPU) o = hsp.AppendUint64(o, z.Memory) o = hsp.AppendUint16(o, z.Node) @@ -112,7 +113,7 @@ func (z *ResourceMeta) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResourceMeta) Msgsize() (s int) { - s = 1 + 17 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 13 + hsp.ArrayHeaderSize + s = 1 + 17 + hsp.Float64Size + 14 + hsp.StringPrefixSize + len(z.EncryptionKey) + 15 + hsp.IntSize + 14 + hsp.Float64Size + 7 + hsp.Uint64Size + 5 + hsp.Uint16Size + 6 + hsp.Uint64Size + 13 + hsp.ArrayHeaderSize for za0001 := range z.TargetMiners { s += z.TargetMiners[za0001].Msgsize() } From b3e28cf8e699dc13d613a4fdcf6d5fa59d3996a5 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 28 Jan 2019 14:42:45 +0800 Subject: [PATCH 238/302] Fix typo --- cmd/cql-explorer/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-explorer/README.md b/cmd/cql-explorer/README.md index c321918ba..3062e854d 100644 --- a/cmd/cql-explorer/README.md +++ b/cmd/cql-explorer/README.md @@ -36,7 +36,7 @@ Usage of cql-explorer: -listen string Listen address for http explorer api (default "127.0.0.1:4665") -password string - Master key password for covenantsql + Master key password for covenantsql ``` ### API From 37fc344ae06039a58d625c7e6dc15509bb50f480 Mon Sep 17 00:00:00 2001 From: auxten Date: Mon, 28 Jan 2019 15:59:46 +0800 Subject: [PATCH 239/302] Format code --- cmd/cql-minerd/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index a0d56617c..b8984630c 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -759,7 +759,7 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualCon // create meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - Node: minerCount, + Node: minerCount, UseEventualConsistency: useEventualConsistency, }, } From 520d72503c41e6c2bf685959b8a5596f2c704582 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 28 Jan 2019 16:30:44 +0800 Subject: [PATCH 240/302] Skip compatibility test if branch name contains '/beta_' --- .gitlab-ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b42fa3cbc..2ae123e4b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -34,6 +34,9 @@ compatibility-testnet: script: - set -o errexit - set -o pipefail + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - make -j8 client - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ From 733418f9e6fafc3d2eb6b84d46e5b83843e163d6 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 28 Jan 2019 17:33:19 +0800 Subject: [PATCH 241/302] Remove '-x' for test_my_project in gitlabci. --- .gitlab-ci.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2ae123e4b..cec6a3234 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,7 +26,6 @@ before_script: test-my-project: stage: test script: - - set -x - ./alltest.sh compatibility-testnet: From 0117b4f9971ab9f4a98ae8b916524b2536758c66 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 28 Jan 2019 18:47:05 +0800 Subject: [PATCH 242/302] Set current bp to leader bp after bp kills on cqld_test --- cmd/cqld/cqld_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/cmd/cqld/cqld_test.go b/cmd/cqld/cqld_test.go index 7d3c267b6..bbadb0ae3 100644 --- a/cmd/cqld/cqld_test.go +++ b/cmd/cqld/cqld_test.go @@ -72,6 +72,9 @@ func TestCQLD(t *testing.T) { So(err, ShouldBeNil) time.Sleep(15 * time.Second) + // set current bp to leader bp + rpc.SetCurrentBP(route.GetBPs()[0]) + // The other peers should be waiting var ( req = &types.FetchLastIrreversibleBlockReq{} From bba27f1b33b943d460f7a2bc421ba26363a5fa50 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 28 Jan 2019 19:05:52 +0800 Subject: [PATCH 243/302] Add missing private key and rename apinode to fullnode --- docker-compose.yml | 104 +++++++++--------- .../{apinode_0 => fullnode_0}/config.yaml | 0 test/service/fullnode_0/private.key | 1 + 3 files changed, 53 insertions(+), 52 deletions(-) rename test/service/{apinode_0 => fullnode_0}/config.yaml (100%) create mode 100644 test/service/fullnode_0/private.key diff --git a/docker-compose.yml b/docker-compose.yml index fc2bfd071..35f20051a 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,4 @@ -version: "3" +version: '3' services: covenantsql_bp_0: @@ -6,133 +6,133 @@ services: container_name: covenantsql_bp_0 restart: always ports: - - "11099:4661" - - "12099:4665" + - '11099:4661' + - '12099:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_0/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_0/:/app/node_0/ networks: default: ipv4_address: 172.254.1.2 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_bp_1: image: covenantsql/covenantsql:latest container_name: covenantsql_bp_1 restart: always ports: - - "11100:4661" - - "12100:4665" + - '11100:4661' + - '12100:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_1/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_1/:/app/node_1/ networks: default: ipv4_address: 172.254.1.3 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_bp_2: image: covenantsql/covenantsql:latest container_name: covenantsql_bp_2 restart: always ports: - - "11101:4661" - - "12101:4665" + - '11101:4661' + - '12101:4665' environment: COVENANT_ROLE: blockproducer COVENANT_CONF: ./node_2/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_2/:/app/node_2/ networks: default: ipv4_address: 172.254.1.4 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_0: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_0 restart: always ports: - - "11102:4661" - - "12102:4665" + - '11102:4661' + - '12102:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_0/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_0/:/app/node_miner_0/ networks: default: ipv4_address: 172.254.1.5 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_1: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_1 restart: always ports: - - "11103:4661" - - "12103:4665" + - '11103:4661' + - '12103:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_1/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_1/:/app/node_miner_1/ networks: default: ipv4_address: 172.254.1.6 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_miner_2: image: covenantsql/covenantsql:latest container_name: covenantsql_miner_2 restart: always ports: - - "11104:4661" - - "12104:4665" + - '11104:4661' + - '12104:4665' environment: COVENANT_ROLE: miner COVENANT_CONF: ./node_miner_2/config.yaml - METRIC_WEB_ADDR: "0.0.0.0:4665" + METRIC_WEB_ADDR: '0.0.0.0:4665' volumes: - ./test/service/node_miner_2/:/app/node_miner_2/ networks: default: ipv4_address: 172.254.1.7 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_adapter: image: covenantsql/covenantsql:latest container_name: covenantsql_adapter restart: always ports: - - "11105:4661" + - '11105:4661' environment: COVENANT_ROLE: adapter COVENANT_CONF: ./node_adapter/config.yaml @@ -148,8 +148,8 @@ services: container_name: covenantsql_mysql_adapter restart: always ports: - - "11107:4664" - command: ["-listen", "0.0.0.0:4664"] + - '11107:4664' + command: ['-listen', '0.0.0.0:4664'] environment: COVENANT_ROLE: mysql-adapter COVENANT_CONF: ./node_mysql_adapter/config.yaml @@ -159,16 +159,16 @@ services: default: ipv4_address: 172.254.1.10 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "1m" - max-file: "10" + max-size: '1m' + max-file: '10' covenantsql_observer: image: covenantsql/covenantsql-observer:latest container_name: covenantsql_observer restart: always ports: - - "11108:80" + - '11108:80' environment: COVENANT_ROLE: observer COVENANT_CONF: ./node_observer/config.yaml @@ -180,16 +180,16 @@ services: default: ipv4_address: 172.254.1.9 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "10m" - covenantsql_api_0: + max-size: '10m' + covenantsql_fn_0: image: covenantsql/covenantsql:latest - container_name: covenantsql_api_0 + container_name: covenantsql_fn_0 restart: always ports: - - "11110:8546" - command: ["-mode", "api", "-wsapi", ":8546"] + - '11110:8546' + command: ['-wsapi', ':8546'] # entrypoint: ["sh"] environment: COVENANT_ROLE: blockproducer @@ -200,9 +200,9 @@ services: default: ipv4_address: 172.254.1.11 logging: - driver: "json-file" + driver: 'json-file' options: - max-size: "10m" + max-size: '10m' networks: default: diff --git a/test/service/apinode_0/config.yaml b/test/service/fullnode_0/config.yaml similarity index 100% rename from test/service/apinode_0/config.yaml rename to test/service/fullnode_0/config.yaml diff --git a/test/service/fullnode_0/private.key b/test/service/fullnode_0/private.key new file mode 100644 index 000000000..900b35b9a --- /dev/null +++ b/test/service/fullnode_0/private.key @@ -0,0 +1 @@ +MaXCmBDcFoQiPL8svDi36Z7MHRHg681uVL7jYd2hDgYo5E8G3yk8n84tfajNkd3Ypbhuc2u12o8x8nrq53dM3g5r3sAq5A \ No newline at end of file From 4f075ff3f6362e854e6b259fc1ce04378023d7a3 Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 28 Jan 2019 19:11:38 +0800 Subject: [PATCH 244/302] Rename folder apinode_0 to fullnode_0 --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index 35f20051a..7117c291c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -193,9 +193,9 @@ services: # entrypoint: ["sh"] environment: COVENANT_ROLE: blockproducer - COVENANT_CONF: ./apinode_0/config.yaml + COVENANT_CONF: ./fullnode_0/config.yaml volumes: - - ./test/service/apinode_0/:/app/apinode_0/ + - ./test/service/fullnode_0/:/app/fullnode_0/ networks: default: ipv4_address: 172.254.1.11 From e9131312bd5204cec07ea7452139b81cb1c134eb Mon Sep 17 00:00:00 2001 From: Ggicci Date: Mon, 28 Jan 2019 19:27:16 +0800 Subject: [PATCH 245/302] Set fullnode_0 config StartupSyncHoles to false --- test/service/fullnode_0/config.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/test/service/fullnode_0/config.yaml b/test/service/fullnode_0/config.yaml index 4e0192e9b..a54e62c05 100644 --- a/test/service/fullnode_0/config.yaml +++ b/test/service/fullnode_0/config.yaml @@ -1,5 +1,5 @@ IsTestMode: true -StartupSyncHoles: true +StartupSyncHoles: false WorkingRoot: ./ PubKeyStoreFile: public.keystore PrivateKeyFile: private.key @@ -19,21 +19,21 @@ DNSSeed: - 202.46.34.75 - 202.46.34.76 BlockProducer: - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + PublicKey: '02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24' NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: a: 313283 b: 0 c: 0 d: 0 - ChainFileName: "chain.db" + ChainFileName: 'chain.db' BPGenesisInfo: Version: 1 BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 Producer: 0000000000000000000000000000000000000000000000000000000000000001 MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: "2019-01-10T12:49:07+08:00" + Timestamp: '2019-01-10T12:49:07+08:00' BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 StableCoinBalance: 10000000000000000000 @@ -55,7 +55,7 @@ KnownNodes: c: 0 d: 0 Addr: 172.254.1.2:4661 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + PublicKey: '02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24' Role: Leader - ID: 00000041772ecd779c68a3928d12675d9a65dce02f2ad6907f2cf53013f7e652 Role: Client From 3999df97c5fd38683af3ddb55d28bad77dad11d3 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 28 Jan 2019 20:59:01 +0800 Subject: [PATCH 246/302] Fix bug: bad critical section for multiple values --- rpc/rpcutil.go | 6 ++++-- rpc/rpcutil_test.go | 26 ++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index ede10f061..3588ca5a5 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -182,7 +182,9 @@ func recordRPCCost(startTime time.Time, method string, err error) { // Optimistically, val will not be nil except the first Call of method // expvar uses sync.Map // So, we try it first without lock - if val = expvar.Get(name); val == nil { + val = expvar.Get(name) + valC = expvar.Get(nameC) + if val == nil || valC == nil { callRPCExpvarLock.Lock() val = expvar.Get(name) if val == nil { @@ -191,9 +193,9 @@ func recordRPCCost(startTime time.Time, method string, err error) { } callRPCExpvarLock.Unlock() val = expvar.Get(name) + valC = expvar.Get(nameC) } val.(mw.Metric).Add(costTime.Seconds()) - valC = expvar.Get(nameC) valC.(mw.Metric).Add(1) return } diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 90a432095..ebde85893 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -18,6 +18,7 @@ package rpc import ( "context" + "fmt" "os" "path/filepath" "runtime" @@ -443,3 +444,28 @@ func BenchmarkPersistentCaller_Call(b *testing.B) { server.Stop() } + +func TestRecordRPCCost(t *testing.T) { + Convey("Bug: bad critical section for multiple values", t, func(c C) { + var ( + start = time.Now() + rounds = 1000 + concurrent = 10 + wg = &sync.WaitGroup{} + body = func(i int) { + defer func() { + c.So(recover(), ShouldBeNil) + wg.Done() + }() + recordRPCCost(start, fmt.Sprintf("M%d", i), nil) + } + ) + defer wg.Wait() + for i := 0; i < rounds; i++ { + for j := 0; j < concurrent; j++ { + wg.Add(1) + go body(i) + } + } + }) +} From 74ea8246b5cf3a682b48322839436275ddd5a326 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Tue, 29 Jan 2019 10:19:42 +0800 Subject: [PATCH 247/302] Run each round sequentially to decrease running goroutines --- rpc/rpcutil_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index ebde85893..22b121c83 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -451,8 +451,7 @@ func TestRecordRPCCost(t *testing.T) { start = time.Now() rounds = 1000 concurrent = 10 - wg = &sync.WaitGroup{} - body = func(i int) { + body = func(wg *sync.WaitGroup, i int) { defer func() { c.So(recover(), ShouldBeNil) wg.Done() @@ -460,12 +459,13 @@ func TestRecordRPCCost(t *testing.T) { recordRPCCost(start, fmt.Sprintf("M%d", i), nil) } ) - defer wg.Wait() for i := 0; i < rounds; i++ { + var wg = &sync.WaitGroup{} for j := 0; j < concurrent; j++ { wg.Add(1) - go body(i) + go body(wg, i) } + wg.Wait() } }) } From 69d3995a091942afe61e5ab425ed9ab0bd277377 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 27 Dec 2018 18:29:53 +0800 Subject: [PATCH 248/302] Support query pattern regulation --- blockproducer/metastate.go | 19 ++--- blockproducer/metastate_test.go | 39 +++++---- client/driver.go | 2 +- client/helper_test.go | 2 +- cmd/cql-minerd/integration_test.go | 3 +- cmd/cql-observer/observation_test.go | 8 +- cmd/cql/main.go | 4 +- types/account.go | 95 ++++++++++++++++----- types/account_gen.go | 51 ++++++++++- types/account_gen_test.go | 37 ++++++++ types/updatepermission.go | 2 +- types/updatepermission_gen.go | 18 +++- types/xxx_test.go | 20 +---- worker/chainbusservice_test.go | 4 +- worker/dbms.go | 73 +++++++++++----- worker/dbms_test.go | 123 ++++++++++++++++++++++++--- worker/helper_test.go | 8 +- 17 files changed, 383 insertions(+), 125 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 0b2c290e6..0390e832e 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -458,7 +458,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI Users: []*types.SQLChainUser{ { Address: addr, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), }, }, } @@ -466,7 +466,7 @@ func (s *metaState) createSQLChain(addr proto.AccountAddress, id proto.DatabaseI } func (s *metaState) addSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error, ) { var ( src, dst *types.SQLChainProfile @@ -515,8 +515,7 @@ func (s *metaState) deleteSQLChainUser(k proto.DatabaseID, addr proto.AccountAdd } func (s *metaState) alterSQLChainUser( - k proto.DatabaseID, addr proto.AccountAddress, perm types.UserPermission) (_ error, -) { + k proto.DatabaseID, addr proto.AccountAddress, perm *types.UserPermission) (_ error) { var ( src, dst *types.SQLChainProfile ok bool @@ -703,7 +702,7 @@ func (s *metaState) matchProvidersWithUser(tx *types.CreateDatabase) (err error) users := make([]*types.SQLChainUser, 1) users[0] = &types.SQLChainUser{ Address: sender, - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, Deposit: minAdvancePayment, AdvancePayment: tx.AdvancePayment, @@ -886,7 +885,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { }).WithError(ErrDatabaseNotFound).Error("unexpected error in updatePermission") return ErrDatabaseNotFound } - if tx.Permission >= types.NumberOfUserPermission { + if !tx.Permission.IsValid() { log.WithFields(log.Fields{ "permission": tx.Permission, "dbID": tx.TargetSQLChain.DatabaseID(), @@ -899,8 +898,8 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { numOfAdmin := 0 targetUserIndex := -1 for i, u := range so.Users { - isAdmin = isAdmin || (sender == u.Address && u.Permission == types.Admin) - if u.Permission == types.Admin { + isAdmin = isAdmin || (sender == u.Address && u.Permission.HasAdminPermission()) + if u.Permission.HasAdminPermission() { numOfAdmin++ } if tx.TargetUser == u.Address { @@ -917,7 +916,7 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } // return error if number of Admin <= 1 and Admin want to revoke permission of itself - if numOfAdmin <= 1 && tx.TargetUser == sender && tx.Permission != types.Admin { + if numOfAdmin <= 1 && tx.TargetUser == sender && !tx.Permission.HasAdminPermission() { err = ErrNoAdminLeft log.WithFields(log.Fields{ "sender": sender, @@ -955,7 +954,7 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { // check sender's permission isAdmin := false for _, user := range so.Users { - if sender == user.Address && user.Permission == types.Admin { + if sender == user.Address && user.Permission.HasAdminPermission() { isAdmin = true break } diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index f6bca9ac2..aedd69e87 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -106,11 +106,11 @@ func TestMetaState(t *testing.T) { Convey("The metaState should failed to operate SQLChain for unknown user", func() { err = ms.createSQLChain(addr1, dbID1) So(err, ShouldEqual, ErrAccountNotFound) - err = ms.addSQLChainUser(dbID1, addr1, types.Admin) + err = ms.addSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Admin)) So(err, ShouldEqual, ErrDatabaseNotFound) err = ms.deleteSQLChainUser(dbID1, addr1) So(err, ShouldEqual, ErrDatabaseNotFound) - err = ms.alterSQLChainUser(dbID1, addr1, types.Write) + err = ms.alterSQLChainUser(dbID1, addr1, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseNotFound) }) Convey("When new account and database objects are stored", func() { @@ -170,9 +170,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldEqual, ErrDatabaseExists) }) Convey("When new SQLChain users are added", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) Convey("The metaState object should be ok to delete user", func() { err = ms.deleteSQLChainUser(dbID3, addr2) @@ -181,9 +181,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) Convey("When metaState change is committed", func() { @@ -204,9 +204,9 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) }) Convey("The metaState object should be ok to alter user", func() { - err = ms.alterSQLChainUser(dbID3, addr2, types.Read) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Read)) So(err, ShouldBeNil) - err = ms.alterSQLChainUser(dbID3, addr2, types.Write) + err = ms.alterSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) }) }) @@ -214,9 +214,9 @@ func TestMetaState(t *testing.T) { Convey("When metaState change is committed", func() { ms.commit() Convey("The metaState object should be ok to add users for database", func() { - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldBeNil) - err = ms.addSQLChainUser(dbID3, addr2, types.Write) + err = ms.addSQLChainUser(dbID3, addr2, types.UserPermissionFromRole(types.Write)) So(err, ShouldEqual, ErrDatabaseUserExists) }) Convey("The metaState object should report database exists", func() { @@ -992,7 +992,7 @@ func TestMetaState(t *testing.T) { UpdatePermissionHeader: types.UpdatePermissionHeader{ TargetSQLChain: addr1, TargetUser: addr3, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Nonce: cd1.Nonce + 1, }, } @@ -1000,7 +1000,7 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrDatabaseNotFound) - up.Permission = 4 + up.Permission = types.UserPermissionFromRole(types.NumberOfUserPermission) up.TargetSQLChain = dbAccount err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1009,7 +1009,7 @@ func TestMetaState(t *testing.T) { // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 - up.Permission = types.Admin + up.Permission = types.UserPermissionFromRole(types.Admin) err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1018,7 +1018,7 @@ func TestMetaState(t *testing.T) { // addr3(admin) update addr4 as read up.TargetUser = addr4 up.Nonce = cd2.Nonce - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1034,7 +1034,7 @@ func TestMetaState(t *testing.T) { ms.commit() // addr3(admin) update addr3(admin) as read fail up.TargetUser = addr3 - up.Permission = types.Read + up.Permission = types.UserPermissionFromRole(types.Read) up.Nonce = up.Nonce + 1 err = up.Sign(privKey3) So(err, ShouldBeNil) @@ -1050,15 +1050,18 @@ func TestMetaState(t *testing.T) { co, loaded = ms.loadSQLChainObject(dbID) for _, user := range co.Users { if user.Address == addr1 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } if user.Address == addr3 { - So(user.Permission, ShouldEqual, types.Admin) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Admin) continue } if user.Address == addr4 { - So(user.Permission, ShouldEqual, types.Read) + So(user.Permission, ShouldNotBeNil) + So(user.Permission.Role, ShouldEqual, types.Read) continue } } diff --git a/client/driver.go b/client/driver.go index ece39b3a0..0f0ca1eb5 100644 --- a/client/driver.go +++ b/client/driver.go @@ -279,7 +279,7 @@ func GetTokenBalance(tt types.TokenType) (balance uint64, err error) { // UpdatePermission sends UpdatePermission transaction to chain. func UpdatePermission(targetUser proto.AccountAddress, - targetChain proto.AccountAddress, perm types.UserPermission) (txHash hash.Hash, err error) { + targetChain proto.AccountAddress, perm *types.UserPermission) (txHash hash.Hash, err error) { if atomic.LoadUint32(&driverInitialized) == 0 { err = ErrNotInitialized return diff --git a/client/helper_test.go b/client/helper_test.go index 7ff55c20b..e72a71acb 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -179,7 +179,7 @@ func startTestService() (stopTestService func(), tempDir string, err error) { return } permStat := &types.PermStat{ - Permission: types.Admin, + Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal, } err = dbms.UpdatePermission(dbID, proto.AccountAddress(addr), permStat) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index b8984630c..8d7351f82 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -439,7 +439,8 @@ func TestFullProcess(t *testing.T) { } permStat, ok := usersMap[clientAddr] So(ok, ShouldBeTrue) - So(permStat.Permission, ShouldEqual, types.Admin) + So(permStat.Permission, ShouldNotBeNil) + So(permStat.Permission.Role, ShouldEqual, types.Admin) So(permStat.Status, ShouldEqual, types.Normal) _, err = db.Exec("CREATE TABLE test (test int)") diff --git a/cmd/cql-observer/observation_test.go b/cmd/cql-observer/observation_test.go index 38791dfbd..81e1f35e3 100644 --- a/cmd/cql-observer/observation_test.go +++ b/cmd/cql-observer/observation_test.go @@ -322,7 +322,7 @@ func TestFullProcess(t *testing.T) { up := types.NewUpdatePermission(&types.UpdatePermissionHeader{ TargetSQLChain: dbAddr, TargetUser: obAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Nonce: nonce, }) err = up.Sign(cliPriv) @@ -344,7 +344,7 @@ func TestFullProcess(t *testing.T) { "stat": user.Status, }).Debug("checkFunc 1") if user.Address == obAddr { - return user.Permission.CheckRead() + return user.Permission.HasReadPermission() } } return false @@ -629,7 +629,7 @@ func TestFullProcess(t *testing.T) { up = types.NewUpdatePermission(&types.UpdatePermissionHeader{ TargetSQLChain: dbAddr2, TargetUser: obAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Nonce: nonce, }) err = up.Sign(cliPriv) @@ -646,7 +646,7 @@ func TestFullProcess(t *testing.T) { err = waitProfileChecking(ctx4, 3*time.Second, proto.DatabaseID(dbID2), func(profile *types.SQLChainProfile) bool { for _, user := range profile.Users { if user.Address == obAddr { - return user.Permission.CheckRead() + return user.Permission.HasReadPermission() } } return false diff --git a/cmd/cql/main.go b/cmd/cql/main.go index cf820e163..547b2ba58 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -372,13 +372,13 @@ func main() { var p types.UserPermission p.FromString(perm.Perm) - if p > types.NumberOfUserPermission { + if p.Role > types.NumberOfUserPermission { log.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return } - txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p) if err != nil { log.WithError(err).Error("update permission failed") diff --git a/types/account.go b/types/account.go index d6a80efb7..299041ba5 100644 --- a/types/account.go +++ b/types/account.go @@ -36,12 +36,21 @@ const ( NumberOfRoles ) +// UserPermissionRole defines role of user permission including admin/write/read. +type UserPermissionRole int32 + // UserPermission defines permissions of a SQLChain user. -type UserPermission int32 +type UserPermission struct { + // User role to access database. + Role UserPermissionRole + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string +} const ( // Void defines the initial permission. - Void UserPermission = iota + Void UserPermissionRole = iota // Admin defines the admin user permission. Admin // Write defines the writer user permission. @@ -52,39 +61,83 @@ const ( NumberOfUserPermission ) -// CheckRead returns true if user owns read permission. -func (up *UserPermission) CheckRead() bool { - return *up >= Admin && *up < NumberOfUserPermission +// UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. +func UserPermissionFromRole(role UserPermissionRole) *UserPermission { + return &UserPermission{ + Role: role, + } +} + +// HasReadPermission returns true if user owns read permission. +func (up *UserPermission) HasReadPermission() bool { + if up == nil { + return false + } + return up.Role >= Admin && up.Role < NumberOfUserPermission +} + +// HasWritePermission returns true if user owns write permission. +func (up *UserPermission) HasWritePermission() bool { + if up == nil { + return false + } + return up.Role >= Admin && up.Role <= Write } -// CheckWrite returns true if user owns write permission. -func (up *UserPermission) CheckWrite() bool { - return *up >= Admin && *up <= Write +// HasAdminPermission returns true if user owns admin permission. +func (up *UserPermission) HasAdminPermission() bool { + if up == nil { + return false + } + return up.Role == Admin } -// CheckAdmin returns true if user owns admin permission. -func (up *UserPermission) CheckAdmin() bool { - return *up == Admin +// IsValid returns whether the permission object is valid or not. +func (up *UserPermission) IsValid() bool { + return up != nil && up.Role < NumberOfUserPermission && up.Role >= Admin } -// Valid returns true if the value is a meaning permission value. -func (up *UserPermission) Valid() bool { - return *up >= Admin && *up < NumberOfUserPermission +// HasDisallowedQueryPatterns returns whether the queries are permitted. +func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query string, status bool) { + if up == nil { + status = true + return + } + if len(up.Patterns) == 0 { + status = false + return + } + + // more queries than patterns + queryMap := make(map[string]bool, len(up.Patterns)) + for _, p := range up.Patterns { + queryMap[p] = true + } + for _, q := range queries { + if !queryMap[q.Pattern] { + // not permitted + query = q.Pattern + status = true + break + } + } + + return } // FromString converts string to UserPermission. func (up *UserPermission) FromString(perm string) { switch perm { case "Admin": - *up = Admin + up.Role = Admin case "Write": - *up = Write + up.Role = Write case "Read": - *up = Read + up.Role = Read case "Void": - *up = Void + up.Role = Void default: - *up = NumberOfUserPermission + up.Role = NumberOfUserPermission } } @@ -113,14 +166,14 @@ func (s *Status) EnableQuery() bool { // PermStat defines the permissions status structure. type PermStat struct { - Permission UserPermission + Permission *UserPermission Status Status } // SQLChainUser defines a SQLChain user. type SQLChainUser struct { Address proto.AccountAddress - Permission UserPermission + Permission *UserPermission AdvancePayment uint64 Arrears uint64 Deposit uint64 diff --git a/types/account_gen.go b/types/account_gen.go index 1e7a3acbd..320d3ebd0 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -247,14 +247,33 @@ func (z *SQLChainUser) MarshalHash() (o []byte, err error) { o = hsp.AppendUint64(o, z.AdvancePayment) o = hsp.AppendUint64(o, z.Arrears) o = hsp.AppendUint64(o, z.Deposit) - o = hsp.AppendInt32(o, int32(z.Permission)) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Permission.Role)) + o = hsp.AppendArrayHeader(o, uint32(len(z.Permission.Patterns))) + for za0001 := range z.Permission.Patterns { + o = hsp.AppendString(o, z.Permission.Patterns[za0001]) + } + } o = hsp.AppendInt32(o, int32(z.Status)) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SQLChainUser) Msgsize() (s int) { - s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + 11 + hsp.Int32Size + 7 + hsp.Int32Size + s = 1 + 8 + z.Address.Msgsize() + 15 + hsp.Uint64Size + 8 + hsp.Uint64Size + 8 + hsp.Uint64Size + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += 1 + 5 + hsp.Int32Size + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Permission.Patterns { + s += hsp.StringPrefixSize + len(z.Permission.Patterns[za0001]) + } + } + s += 7 + hsp.Int32Size return } @@ -294,7 +313,31 @@ func (z *UserArrears) Msgsize() (s int) { } // MarshalHash marshals for hash -func (z UserPermission) MarshalHash() (o []byte, err error) { +func (z *UserPermission) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendArrayHeader(o, uint32(len(z.Patterns))) + for za0001 := range z.Patterns { + o = hsp.AppendString(o, z.Patterns[za0001]) + } + o = hsp.AppendInt32(o, int32(z.Role)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *UserPermission) Msgsize() (s int) { + s = 1 + 9 + hsp.ArrayHeaderSize + for za0001 := range z.Patterns { + s += hsp.StringPrefixSize + len(z.Patterns[za0001]) + } + s += 5 + hsp.Int32Size + return +} + +// MarshalHash marshals for hash +func (z UserPermissionRole) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) o = hsp.AppendInt32(o, int32(z)) @@ -302,7 +345,7 @@ func (z UserPermission) MarshalHash() (o []byte, err error) { } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z UserPermission) Msgsize() (s int) { +func (z UserPermissionRole) Msgsize() (s int) { s = hsp.Int32Size return } diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 30e9ad803..388a19ddb 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -230,3 +230,40 @@ func BenchmarkAppendMsgUserArrears(b *testing.B) { bts, _ = v.MarshalHash() } } + +func TestMarshalHashUserPermission(t *testing.T) { + v := UserPermission{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashUserPermission(b *testing.B) { + v := UserPermission{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgUserPermission(b *testing.B) { + v := UserPermission{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} diff --git a/types/updatepermission.go b/types/updatepermission.go index 1b7ed46a6..729829c3d 100644 --- a/types/updatepermission.go +++ b/types/updatepermission.go @@ -30,7 +30,7 @@ import ( type UpdatePermissionHeader struct { TargetSQLChain proto.AccountAddress TargetUser proto.AccountAddress - Permission UserPermission + Permission *UserPermission Nonce interfaces.AccountNonce } diff --git a/types/updatepermission_gen.go b/types/updatepermission_gen.go index 9c54d6e2f..11ba931d5 100644 --- a/types/updatepermission_gen.go +++ b/types/updatepermission_gen.go @@ -47,10 +47,14 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { } else { o = hsp.AppendBytes(o, oTemp) } - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err + if z.Permission == nil { + o = hsp.AppendNil(o) } else { - o = hsp.AppendBytes(o, oTemp) + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } } if oTemp, err := z.TargetSQLChain.MarshalHash(); err != nil { return nil, err @@ -67,6 +71,12 @@ func (z *UpdatePermissionHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdatePermissionHeader) Msgsize() (s int) { - s = 1 + 6 + z.Nonce.Msgsize() + 11 + z.Permission.Msgsize() + 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() + s = 1 + 6 + z.Nonce.Msgsize() + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += z.Permission.Msgsize() + } + s += 15 + z.TargetSQLChain.Msgsize() + 11 + z.TargetUser.Msgsize() return } diff --git a/types/xxx_test.go b/types/xxx_test.go index ab9d8f77f..cf2454ab6 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -68,7 +68,6 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro if err != nil { return - } h := hash.Hash{} @@ -96,8 +95,8 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro } err = b.PackAndSignBlock(priv) - return + return } func generateRandomBillingRequestHeader() *BillingRequestHeader { @@ -109,7 +108,6 @@ func generateRandomBillingRequestHeader() *BillingRequestHeader { HighHeight: rand.Int31(), GasAmounts: generateRandomGasAmount(peerNum), } - } func generateRandomBillingRequest() (req *BillingRequest, err error) { @@ -119,7 +117,6 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { } if _, err = req.PackRequestHeader(); err != nil { return nil, err - } for i := 0; i < peerNum; i++ { @@ -128,36 +125,29 @@ func generateRandomBillingRequest() (req *BillingRequest, err error) { if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } - } return - } func generateRandomBillingHeader() (tc *BillingHeader, err error) { var req *BillingRequest if req, err = generateRandomBillingRequest(); err != nil { return - } var priv *asymmetric.PrivateKey if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { return - } if _, _, err = req.SignRequestHeader(priv, false); err != nil { return - } receivers := make([]*proto.AccountAddress, peerNum) @@ -169,33 +159,27 @@ func generateRandomBillingHeader() (tc *BillingHeader, err error) { receivers[i] = &accountAddress fees[i] = rand.Uint64() rewards[i] = rand.Uint64() - } producer := proto.AccountAddress(generateRandomHash()) tc = NewBillingHeader(pi.AccountNonce(rand.Uint32()), req, producer, receivers, fees, rewards) return tc, nil - } func generateRandomBilling() (*Billing, error) { header, err := generateRandomBillingHeader() if err != nil { return nil, err - } priv, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { return nil, err - } txBilling := NewBilling(header) if err := txBilling.Sign(priv); err != nil { return nil, err - } return txBilling, nil - } func generateRandomGasAmount(n int) []*proto.AddrAndGas { @@ -207,11 +191,9 @@ func generateRandomGasAmount(n int) []*proto.AddrAndGas { RawNodeID: proto.RawNodeID{Hash: generateRandomHash()}, GasAmount: rand.Uint64(), } - } return gasAmount - } func randBytes(n int) (b []byte) { diff --git a/worker/chainbusservice_test.go b/worker/chainbusservice_test.go index 2429061e6..8fd721e2d 100644 --- a/worker/chainbusservice_test.go +++ b/worker/chainbusservice_test.go @@ -94,7 +94,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) @@ -116,7 +116,7 @@ func TestNewBusService(t *testing.T) { permStat, ok := bs.RequestPermStat(profile.ID, testAddr) So(ok, ShouldBeTrue) So(permStat.Status, ShouldEqual, profile.Users[0].Status) - So(permStat.Permission, ShouldEqual, profile.Users[0].Permission) + So(permStat.Permission, ShouldResemble, profile.Users[0].Permission) permStat, ok = bs.RequestPermStat(profile.ID, testNotExistAddr) } p, ok := bs.RequestSQLProfile(testNotExistID) diff --git a/worker/dbms.go b/worker/dbms.go index df094218c..028025b84 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -445,7 +445,7 @@ func (dbms *DBMS) Query(req *types.Request) (res *types.Response, err error) { if err != nil { return } - err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType) + err = dbms.checkPermission(addr, req.Header.DatabaseID, req.Header.QueryType, req.Payload.Queries) if err != nil { return } @@ -509,32 +509,59 @@ func (dbms *DBMS) removeMeta(dbID proto.DatabaseID) (err error) { } func (dbms *DBMS) checkPermission(addr proto.AccountAddress, - dbID proto.DatabaseID, queryType types.QueryType) (err error) { + dbID proto.DatabaseID, queryType types.QueryType, queries []types.Query) (err error) { log.Debugf("in checkPermission, database id: %s, user addr: %s", dbID, addr.String()) - if permStat, ok := dbms.busService.RequestPermStat(dbID, addr); ok { - if !permStat.Status.EnableQuery() { - err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + var ( + permStat *types.PermStat + ok bool + ) + + // get database perm stat + permStat, ok = dbms.busService.RequestPermStat(dbID, addr) + + // perm stat not exists + if !ok { + err = errors.Wrap(ErrPermissionDeny, "database not exists") + return + } + + // check if query is enabled + if !permStat.Status.EnableQuery() { + err = errors.Wrapf(ErrPermissionDeny, "cannot query, status: %d", permStat.Status) + return + } + + // check query type permission + switch queryType { + case types.ReadQuery: + if !permStat.Permission.HasReadPermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) return } - if queryType == types.ReadQuery { - if !permStat.Permission.CheckRead() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) - return - } - } else if queryType == types.WriteQuery { - if !permStat.Permission.CheckWrite() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) - return - } - } else { - err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) + case types.WriteQuery: + if !permStat.Permission.HasWritePermission() { + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) return - } - } else { - err = errors.Wrap(ErrPermissionDeny, "database not exists") + default: + err = errors.Wrapf(ErrInvalidPermission, + "invalid permission, permission: %d", permStat.Permission) + return + } + + // check for query pattern + var ( + disallowedQuery string + hasDisallowedQuery bool + ) + + if disallowedQuery, hasDisallowedQuery = permStat.Permission.HasDisallowedQueryPatterns(queries); hasDisallowedQuery { + err = errors.Wrapf(ErrPermissionDeny, "disallowed query %s", disallowedQuery) + log.WithError(err).WithFields(log.Fields{ + "permission": permStat.Permission, + "query": disallowedQuery, + }).Debug("can not query") return } @@ -548,7 +575,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, log.WithFields(log.Fields{ "databaseID": dbID, "nodeID": nodeID, - }).WithError(err).Warning("get pubkey failed in addTxSubscription") + }).WithError(err).Warning("get public key failed in addTxSubscription") return } addr, err := crypto.PubKeyHash(pubkey) @@ -567,7 +594,7 @@ func (dbms *DBMS) addTxSubscription(dbID proto.DatabaseID, nodeID proto.NodeID, "startHeight": startHeight, }).Debugf("addTxSubscription") - err = dbms.checkPermission(addr, dbID, types.ReadQuery) + err = dbms.checkPermission(addr, dbID, types.ReadQuery, nil) if err != nil { log.WithFields(log.Fields{"databaseID": dbID, "addr": addr}).WithError(err).Warning("permission deny") return diff --git a/worker/dbms_test.go b/worker/dbms_test.go index 4895b024f..db8d80cbe 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -134,11 +134,12 @@ func TestDBMS(t *testing.T) { // grant write and read permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Write, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) So(err, ShouldBeNil) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Write) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Write) So(userState.Status, ShouldEqual, types.Normal) Convey("success write and read", func() { @@ -193,10 +194,11 @@ func TestDBMS(t *testing.T) { // revoke write permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Read, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Read), Status: types.Normal}) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Read) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Read) So(userState.Status, ShouldEqual, types.Normal) Convey("success reading and fail to write", func() { @@ -229,10 +231,12 @@ func TestDBMS(t *testing.T) { // grant invalid permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Void, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Void), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Void) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Void) So(userState.Status, ShouldEqual, types.Normal) Convey("invalid permission query should fail", func() { @@ -264,10 +268,12 @@ func TestDBMS(t *testing.T) { // grant admin permission but in arrears err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Arrears}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Arrears}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Arrears) Convey("arrears query should fail", func() { @@ -296,10 +302,12 @@ func TestDBMS(t *testing.T) { // switch user to normal err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.Admin, Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) - So(userState.Permission, ShouldEqual, types.Admin) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) So(userState.Status, ShouldEqual, types.Normal) Convey("can send read and write queries", func() { @@ -346,6 +354,101 @@ func TestDBMS(t *testing.T) { So(err, ShouldBeNil) }) + // enforce query pattern regulations + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: &types.UserPermission{ + Role: types.Admin, + Patterns: []string{ + "create table test (test int)", + "SELECT 1", + "INSERT INTO TEST VALUES(1)", + }, + }, Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Permission.Patterns, ShouldHaveLength, 3) + + Convey("query patterns restrictions", func() { + var writeQuery *types.Request + var queryRes *types.Response + + // sending allowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 11, dbID, []string{ + "create table test (test int)", + "INSERT INTO TEST VALUES(1)", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldBeNil) + err = queryRes.Verify() + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, 0) + + // sending allowed read query + var readQuery *types.Request + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 12, dbID, []string{ + "SELECT 1", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldBeNil) + err = queryRes.Verify() + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) + So(queryRes.Payload.Rows, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values, ShouldHaveLength, 1) + So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) + + // sending disallowed write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 13, dbID, []string{ + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed write query mixed with valid write query + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 14, dbID, []string{ + "INSERT INTO TEST VALUES(1)", + "insert into test values(1)", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 15, dbID, []string{ + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + + // sending disallowed read query + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 16, dbID, []string{ + "SELECT 1", + "select * from test", + }) + So(err, ShouldBeNil) + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + }) + + // set back permission object + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: types.UserPermissionFromRole(types.Admin), Status: types.Normal}) + So(err, ShouldBeNil) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Admin) + So(userState.Status, ShouldEqual, types.Normal) + Convey("query non-existent database", func() { // sending write query var writeQuery *types.Request diff --git a/worker/helper_test.go b/worker/helper_test.go index e7b48b14f..d561f2ed6 100644 --- a/worker/helper_test.go +++ b/worker/helper_test.go @@ -100,22 +100,22 @@ var ( testNotExistAddr = proto.AccountAddress(hash.THashH([]byte{'a', 'a'})) testUser1 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal, } testUser2 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arrears, } testUser3 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Write, + Permission: types.UserPermissionFromRole(types.Write), Status: types.Reminder, } testUser4 = &types.SQLChainUser{ Address: testAddr, - Permission: types.Read, + Permission: types.UserPermissionFromRole(types.Read), Status: types.Arbitration, } ) From 816decd4640d36d2c226310a22dccb16b186adc9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 27 Dec 2018 18:56:03 +0800 Subject: [PATCH 249/302] Use cache for query pattern permission matching --- types/account.go | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/types/account.go b/types/account.go index 299041ba5..429eed134 100644 --- a/types/account.go +++ b/types/account.go @@ -17,6 +17,8 @@ package types import ( + "sync" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/proto" ) @@ -46,6 +48,10 @@ type UserPermission struct { // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. Patterns []string + + // patterns map cache for matching + cachedPatternMapOnce sync.Once + cachedPatternMap map[string]bool } const ( @@ -108,13 +114,15 @@ func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query str return } - // more queries than patterns - queryMap := make(map[string]bool, len(up.Patterns)) - for _, p := range up.Patterns { - queryMap[p] = true - } + up.cachedPatternMapOnce.Do(func() { + up.cachedPatternMap = make(map[string]bool, len(up.Patterns)) + for _, p := range up.Patterns { + up.cachedPatternMap[p] = true + } + }) + for _, q := range queries { - if !queryMap[q.Pattern] { + if !up.cachedPatternMap[q.Pattern] { // not permitted query = q.Pattern status = true From b68c1ecda54379ee5b1258805ac9587eb675e2eb Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 23:12:16 +0800 Subject: [PATCH 250/302] Make cql updatePermission feature compatible with sql pattern config --- cmd/cql/main.go | 20 +++++---- types/account.go | 68 +++++++++++++++++++++++-------- types/account_test.go | 95 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 158 insertions(+), 25 deletions(-) create mode 100644 types/account_test.go diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 547b2ba58..ea78c1e54 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -80,7 +80,7 @@ var ( type userPermission struct { TargetChain proto.AccountAddress `json:"chain"` TargetUser proto.AccountAddress `json:"user"` - Perm string `json:"perm"` + Perm json.RawMessage `json:"perm"` } type tranToken struct { @@ -371,15 +371,21 @@ func main() { } var p types.UserPermission - p.FromString(perm.Perm) - if p.Role > types.NumberOfUserPermission { - log.WithError(err).Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return + + if err := json.Unmarshal(perm.Perm, &p); err != nil { + // try again using role string representation + if err := json.Unmarshal(perm.Perm, &p.Role); err != nil { + log.WithError(err).Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } else if !p.IsValid() { + log.Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } } txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p) - if err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) diff --git a/types/account.go b/types/account.go index 429eed134..34549ff06 100644 --- a/types/account.go +++ b/types/account.go @@ -17,6 +17,7 @@ package types import ( + "encoding/json" "sync" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -44,10 +45,10 @@ type UserPermissionRole int32 // UserPermission defines permissions of a SQLChain user. type UserPermission struct { // User role to access database. - Role UserPermissionRole + Role UserPermissionRole `json:"role"` // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string + Patterns []string `json:"patterns"` // patterns map cache for matching cachedPatternMapOnce sync.Once @@ -67,6 +68,53 @@ const ( NumberOfUserPermission ) +// UnmarshalJSON implements the json.Unmarshler interface. +func (r *UserPermissionRole) UnmarshalJSON(data []byte) (err error) { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return + } + r.FromString(s) + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (r UserPermissionRole) MarshalJSON() ([]byte, error) { + return json.Marshal(r.String()) +} + +// String implements the fmt.Stringer interface. +func (r UserPermissionRole) String() string { + switch r { + case Admin: + return "Admin" + case Write: + return "Write" + case Read: + return "Read" + case Void: + return "Void" + default: + return "Unknown" + } +} + +// FromString converts string to UserPermissionRole. +func (r *UserPermissionRole) FromString(perm string) { + switch perm { + case "Admin": + *r = Admin + case "Write": + *r = Write + case "Read": + *r = Read + case "Void": + *r = Void + default: + *r = NumberOfUserPermission + } +} + // UserPermissionFromRole construct a new user permission instance from primitive user permission role enum. func UserPermissionFromRole(role UserPermissionRole) *UserPermission { return &UserPermission{ @@ -133,22 +181,6 @@ func (up *UserPermission) HasDisallowedQueryPatterns(queries []Query) (query str return } -// FromString converts string to UserPermission. -func (up *UserPermission) FromString(perm string) { - switch perm { - case "Admin": - up.Role = Admin - case "Write": - up.Role = Write - case "Read": - up.Role = Read - case "Void": - up.Role = Void - default: - up.Role = NumberOfUserPermission - } -} - // Status defines status of a SQLChain user/miner. type Status int32 diff --git a/types/account_test.go b/types/account_test.go new file mode 100644 index 000000000..7a7eb665e --- /dev/null +++ b/types/account_test.go @@ -0,0 +1,95 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "encoding/json" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestUserPermissionFromRole(t *testing.T) { + Convey("test marshal/unmarshal json", t, func() { + jsonBytes, err := json.Marshal(Read) + So(err, ShouldBeNil) + So(jsonBytes, ShouldResemble, []byte(`"Read"`)) + var r UserPermissionRole + So(r, ShouldEqual, Void) + err = json.Unmarshal([]byte(`"Write"`), &r) + So(err, ShouldBeNil) + So(r, ShouldEqual, Write) + }) + Convey("test string/from string", t, func() { + var r UserPermissionRole + So(r, ShouldEqual, Void) + r.FromString(Read.String()) + So(r, ShouldEqual, Read) + }) +} + +func TestUserPermission(t *testing.T) { + Convey("nil protect", t, func() { + p := (*UserPermission)(nil) + So(p.HasReadPermission(), ShouldBeFalse) + So(p.HasWritePermission(), ShouldBeFalse) + So(p.HasAdminPermission(), ShouldBeFalse) + So(p.IsValid(), ShouldBeFalse) + _, state := p.HasDisallowedQueryPatterns([]Query{}) + So(state, ShouldBeTrue) + }) + Convey("has read permission", t, func() { + So(UserPermissionFromRole(Void).HasReadPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasReadPermission(), ShouldBeFalse) + }) + Convey("has write permission", t, func() { + So(UserPermissionFromRole(Void).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasWritePermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasWritePermission(), ShouldBeFalse) + }) + Convey("has admin permission", t, func() { + So(UserPermissionFromRole(Void).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Admin).HasAdminPermission(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).HasAdminPermission(), ShouldBeFalse) + }) + Convey("is valid", t, func() { + So(UserPermissionFromRole(Void).IsValid(), ShouldBeFalse) + So(UserPermissionFromRole(Read).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Write).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(Admin).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(NumberOfUserPermission).IsValid(), ShouldBeFalse) + }) + Convey("query patterns", t, func() { + // empty patterns limitation + _, state := UserPermissionFromRole(Read).HasDisallowedQueryPatterns([]Query{ + { + Pattern: "select 1", + }, + { + Pattern: "insert into test values(1)", + }, + }) + So(state, ShouldBeFalse) + }) +} From f3d2259b1218c7f5ddc94a6822c40f25a60d2bdb Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 7 Jan 2019 23:38:39 +0800 Subject: [PATCH 251/302] Move updatePermission json tag declaration into cql command package --- cmd/cql/main.go | 31 +++++++++++++++++++++++-------- types/account.go | 4 ++-- types/account_gen.go | 32 ++++++++++++++++++++++++++++++++ types/account_gen_test.go | 37 +++++++++++++++++++++++++++++++++++++ 4 files changed, 94 insertions(+), 10 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index ea78c1e54..825e9f669 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -83,6 +83,14 @@ type userPermission struct { Perm json.RawMessage `json:"perm"` } +type userPermPayload struct { + // User role to access database. + Role types.UserPermissionRole `json:"role"` + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string `json:"patterns"` +} + type tranToken struct { TargetUser proto.AccountAddress `json:"addr"` Amount string `json:"amount"` @@ -370,22 +378,29 @@ func main() { return } - var p types.UserPermission + var permPayload userPermPayload - if err := json.Unmarshal(perm.Perm, &p); err != nil { + if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { // try again using role string representation - if err := json.Unmarshal(perm.Perm, &p.Role); err != nil { + if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { log.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return - } else if !p.IsValid() { - log.Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return } } - txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, &p) + p := &types.UserPermission{ + Role: permPayload.Role, + Patterns: permPayload.Patterns, + } + + if !p.IsValid() { + log.Errorf("update permission failed: invalid permission description") + os.Exit(-1) + return + } + + txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) if err != nil { log.WithError(err).Error("update permission failed") os.Exit(-1) diff --git a/types/account.go b/types/account.go index 34549ff06..dce6b1449 100644 --- a/types/account.go +++ b/types/account.go @@ -45,10 +45,10 @@ type UserPermissionRole int32 // UserPermission defines permissions of a SQLChain user. type UserPermission struct { // User role to access database. - Role UserPermissionRole `json:"role"` + Role UserPermissionRole // SQL pattern regulations for user queries // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string `json:"patterns"` + Patterns []string // patterns map cache for matching cachedPatternMapOnce sync.Once diff --git a/types/account_gen.go b/types/account_gen.go index 320d3ebd0..2807344b4 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -89,6 +89,38 @@ func (z *MinerInfo) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z *PermStat) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82, 0x82) + if z.Permission == nil { + o = hsp.AppendNil(o) + } else { + if oTemp, err := z.Permission.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } + } + o = append(o, 0x82) + o = hsp.AppendInt32(o, int32(z.Status)) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z *PermStat) Msgsize() (s int) { + s = 1 + 11 + if z.Permission == nil { + s += hsp.NilSize + } else { + s += z.Permission.Msgsize() + } + s += 7 + hsp.Int32Size + return +} + // MarshalHash marshals for hash func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 388a19ddb..9b6a8a5d3 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -83,6 +83,43 @@ func BenchmarkAppendMsgMinerInfo(b *testing.B) { } } +func TestMarshalHashPermStat(t *testing.T) { + v := PermStat{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashPermStat(b *testing.B) { + v := PermStat{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgPermStat(b *testing.B) { + v := PermStat{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashProviderProfile(t *testing.T) { v := ProviderProfile{} binary.Read(rand.Reader, binary.BigEndian, &v) From b1d0c9cc263dd3d3bb726efc9a66a4bcfe46e2df Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 8 Jan 2019 15:15:50 +0800 Subject: [PATCH 252/302] Refactor Read/Write permission into Read/Write/ReadWrite and using flag bit --- blockproducer/errors.go | 4 +- blockproducer/metastate.go | 26 ++-- blockproducer/metastate_test.go | 4 +- types/account.go | 92 +++++++++----- types/account_test.go | 25 ++-- worker/dbms_test.go | 212 ++++++++++++++++++++++---------- 6 files changed, 236 insertions(+), 127 deletions(-) diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 42421b072..2a6cc74cb 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -60,8 +60,8 @@ var ( ErrNoEnoughMiner = errors.New("can not get enough miners") // ErrAccountPermissionDeny indicates that the sender does not own admin permission to the sqlchain. ErrAccountPermissionDeny = errors.New("account permission deny") - // ErrNoAdminLeft indicates there is no admin user in sqlchain. - ErrNoAdminLeft = errors.New("no admin user left") + // ErrNoSuperUserLeft indicates there is no super user in sqlchain. + ErrNoSuperUserLeft = errors.New("no super user left") // ErrInvalidPermission indicates that the permission is invalid. ErrInvalidPermission = errors.New("invalid permission") // ErrMinerUserNotMatch indicates that the miner and user do not match. diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 0390e832e..68db687c3 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -893,21 +893,21 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { return ErrInvalidPermission } - // check whether sender is admin and find targetUser - isAdmin := false - numOfAdmin := 0 + // check whether sender has super privilege and find targetUser + isSuperUser := false + numOfSuperUsers := 0 targetUserIndex := -1 for i, u := range so.Users { - isAdmin = isAdmin || (sender == u.Address && u.Permission.HasAdminPermission()) - if u.Permission.HasAdminPermission() { - numOfAdmin++ + isSuperUser = isSuperUser || (sender == u.Address && u.Permission.HasSuperPermission()) + if u.Permission.HasSuperPermission() { + numOfSuperUsers++ } if tx.TargetUser == u.Address { targetUserIndex = i } } - if !isAdmin { + if !isSuperUser { log.WithFields(log.Fields{ "sender": sender, "dbID": tx.TargetSQLChain, @@ -916,8 +916,8 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } // return error if number of Admin <= 1 and Admin want to revoke permission of itself - if numOfAdmin <= 1 && tx.TargetUser == sender && !tx.Permission.HasAdminPermission() { - err = ErrNoAdminLeft + if numOfSuperUsers <= 1 && tx.TargetUser == sender && !tx.Permission.HasSuperPermission() { + err = ErrNoSuperUserLeft log.WithFields(log.Fields{ "sender": sender, "dbID": tx.TargetSQLChain, @@ -952,14 +952,14 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { } // check sender's permission - isAdmin := false + isSuperUser := false for _, user := range so.Users { - if sender == user.Address && user.Permission.HasAdminPermission() { - isAdmin = true + if sender == user.Address && user.Permission.HasSuperPermission() { + isSuperUser = true break } } - if !isAdmin { + if !isSuperUser { log.WithFields(log.Fields{ "sender": sender, "dbID": tx.TargetSQLChain, diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index aedd69e87..21fe2ae02 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -1000,7 +1000,7 @@ func TestMetaState(t *testing.T) { So(err, ShouldBeNil) err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrDatabaseNotFound) - up.Permission = types.UserPermissionFromRole(types.NumberOfUserPermission) + up.Permission = types.UserPermissionFromRole(types.Void) up.TargetSQLChain = dbAccount err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1039,7 +1039,7 @@ func TestMetaState(t *testing.T) { err = up.Sign(privKey3) So(err, ShouldBeNil) err = ms.apply(&up) - So(errors.Cause(err), ShouldEqual, ErrNoAdminLeft) + So(errors.Cause(err), ShouldEqual, ErrNoSuperUserLeft) // addr1(read) update addr3(admin) fail up.Nonce = cd1.Nonce + 2 err = up.Sign(privKey1) diff --git a/types/account.go b/types/account.go index dce6b1449..b0eac76b1 100644 --- a/types/account.go +++ b/types/account.go @@ -18,6 +18,7 @@ package types import ( "encoding/json" + "strings" "sync" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -56,16 +57,24 @@ type UserPermission struct { } const ( - // Void defines the initial permission. - Void UserPermissionRole = iota - // Admin defines the admin user permission. - Admin + // Read defines the read user permission. + Read UserPermissionRole = 1 << iota // Write defines the writer user permission. Write - // Read defines the reader user permission. - Read - // NumberOfUserPermission defines the user permission number. - NumberOfUserPermission + // Super defines the super user permission. + Super + + // ReadOnly defines the reader user permission. + ReadOnly = Read + // WriteOnly defines the writer user permission. + WriteOnly = Write + // ReadWrite defines the reader && writer user permission. + ReadWrite = Read | Write + // Admin defines the privilege to full control the database. + Admin = Read | Write | Super + + // Void defines the initial permission. + Void UserPermissionRole = 0 ) // UnmarshalJSON implements the json.Unmarshler interface. @@ -85,33 +94,48 @@ func (r UserPermissionRole) MarshalJSON() ([]byte, error) { // String implements the fmt.Stringer interface. func (r UserPermissionRole) String() string { - switch r { - case Admin: - return "Admin" - case Write: - return "Write" - case Read: - return "Read" - case Void: + if r == Void { return "Void" - default: - return "Unknown" + } else if r == Admin { + return "Admin" + } + + var res []string + if r&Read != 0 { + res = append(res, "Read") + } + if r&Write != 0 { + res = append(res, "Write") + } + if r&Super != 0 { + res = append(res, "Super") } + + return strings.Join(res, ",") } // FromString converts string to UserPermissionRole. func (r *UserPermissionRole) FromString(perm string) { - switch perm { - case "Admin": - *r = Admin - case "Write": - *r = Write - case "Read": - *r = Read - case "Void": + if perm == "Void" { *r = Void - default: - *r = NumberOfUserPermission + return + } else if perm == "Admin" { + *r = Admin + return + } + + *r = Void + + for _, p := range strings.Split(perm, ",") { + p = strings.TrimSpace(p) + switch p { + case "Read": + *r |= Read + case "Write": + *r |= Write + case "Super": + *r |= Super + } } } @@ -127,7 +151,7 @@ func (up *UserPermission) HasReadPermission() bool { if up == nil { return false } - return up.Role >= Admin && up.Role < NumberOfUserPermission + return up.Role&Read != 0 } // HasWritePermission returns true if user owns write permission. @@ -135,20 +159,20 @@ func (up *UserPermission) HasWritePermission() bool { if up == nil { return false } - return up.Role >= Admin && up.Role <= Write + return up.Role&Write != 0 } -// HasAdminPermission returns true if user owns admin permission. -func (up *UserPermission) HasAdminPermission() bool { +// HasSuperPermission returns true if user owns super permission. +func (up *UserPermission) HasSuperPermission() bool { if up == nil { return false } - return up.Role == Admin + return up.Role&Super != 0 } // IsValid returns whether the permission object is valid or not. func (up *UserPermission) IsValid() bool { - return up != nil && up.Role < NumberOfUserPermission && up.Role >= Admin + return up != nil && up.Role != 0 } // HasDisallowedQueryPatterns returns whether the queries are permitted. diff --git a/types/account_test.go b/types/account_test.go index 7a7eb665e..828930869 100644 --- a/types/account_test.go +++ b/types/account_test.go @@ -33,12 +33,17 @@ func TestUserPermissionFromRole(t *testing.T) { err = json.Unmarshal([]byte(`"Write"`), &r) So(err, ShouldBeNil) So(r, ShouldEqual, Write) + err = json.Unmarshal([]byte(`"Read,Write"`), &r) + So(err, ShouldBeNil) + So(r, ShouldEqual, ReadWrite) }) Convey("test string/from string", t, func() { var r UserPermissionRole So(r, ShouldEqual, Void) r.FromString(Read.String()) So(r, ShouldEqual, Read) + r.FromString(ReadWrite.String()) + So(r, ShouldEqual, ReadWrite) }) } @@ -47,7 +52,7 @@ func TestUserPermission(t *testing.T) { p := (*UserPermission)(nil) So(p.HasReadPermission(), ShouldBeFalse) So(p.HasWritePermission(), ShouldBeFalse) - So(p.HasAdminPermission(), ShouldBeFalse) + So(p.HasSuperPermission(), ShouldBeFalse) So(p.IsValid(), ShouldBeFalse) _, state := p.HasDisallowedQueryPatterns([]Query{}) So(state, ShouldBeTrue) @@ -55,30 +60,30 @@ func TestUserPermission(t *testing.T) { Convey("has read permission", t, func() { So(UserPermissionFromRole(Void).HasReadPermission(), ShouldBeFalse) So(UserPermissionFromRole(Read).HasReadPermission(), ShouldBeTrue) - So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeTrue) + So(UserPermissionFromRole(Write).HasReadPermission(), ShouldBeFalse) + So(UserPermissionFromRole(ReadWrite).HasReadPermission(), ShouldBeTrue) So(UserPermissionFromRole(Admin).HasReadPermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasReadPermission(), ShouldBeFalse) }) Convey("has write permission", t, func() { So(UserPermissionFromRole(Void).HasWritePermission(), ShouldBeFalse) So(UserPermissionFromRole(Read).HasWritePermission(), ShouldBeFalse) So(UserPermissionFromRole(Write).HasWritePermission(), ShouldBeTrue) + So(UserPermissionFromRole(ReadWrite).HasWritePermission(), ShouldBeTrue) So(UserPermissionFromRole(Admin).HasWritePermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasWritePermission(), ShouldBeFalse) }) Convey("has admin permission", t, func() { - So(UserPermissionFromRole(Void).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Read).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Write).HasAdminPermission(), ShouldBeFalse) - So(UserPermissionFromRole(Admin).HasAdminPermission(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).HasAdminPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Void).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Read).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Write).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(ReadWrite).HasSuperPermission(), ShouldBeFalse) + So(UserPermissionFromRole(Admin).HasSuperPermission(), ShouldBeTrue) }) Convey("is valid", t, func() { So(UserPermissionFromRole(Void).IsValid(), ShouldBeFalse) So(UserPermissionFromRole(Read).IsValid(), ShouldBeTrue) So(UserPermissionFromRole(Write).IsValid(), ShouldBeTrue) + So(UserPermissionFromRole(ReadWrite).IsValid(), ShouldBeTrue) So(UserPermissionFromRole(Admin).IsValid(), ShouldBeTrue) - So(UserPermissionFromRole(NumberOfUserPermission).IsValid(), ShouldBeFalse) }) Convey("query patterns", t, func() { // empty patterns limitation diff --git a/worker/dbms_test.go b/worker/dbms_test.go index db8d80cbe..92309ca6b 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -19,6 +19,7 @@ package worker import ( "io/ioutil" "os" + "sync/atomic" "testing" "time" @@ -103,6 +104,8 @@ func TestDBMS(t *testing.T) { err = req.Sign(privateKey) So(err, ShouldBeNil) + var seqNo uint64 + Convey("with bp privilege", func() { // send update again err = testRequest(route.DBSDeploy, req, &res) @@ -112,10 +115,12 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -123,9 +128,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -134,22 +141,24 @@ func TestDBMS(t *testing.T) { // grant write and read permission err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, - &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) + &types.PermStat{Permission: types.UserPermissionFromRole(types.ReadWrite), Status: types.Normal}) So(err, ShouldBeNil) userState, ok := dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) So(ok, ShouldBeTrue) So(userState.Permission, ShouldNotBeNil) - So(userState.Permission.Role, ShouldEqual, types.Write) + So(userState.Permission.Role, ShouldEqual, types.ReadWrite) So(userState.Status, ShouldEqual, types.Normal) Convey("success write and read", func() { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -160,9 +169,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 2, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -205,20 +216,24 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 3, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldNotBeNil) So(err.Error(), ShouldContainSubstring, ErrPermissionDeny.Error()) // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 4, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -227,6 +242,46 @@ func TestDBMS(t *testing.T) { err = dbms.addTxSubscription(dbID, nodeID, 1) So(err, ShouldBeNil) }) + + // grant write only permission + err = dbms.UpdatePermission(dbAddr.DatabaseID(), userAddr, + &types.PermStat{Permission: types.UserPermissionFromRole(types.Write), Status: types.Normal}) + userState, ok = dbms.busService.RequestPermStat(dbAddr.DatabaseID(), userAddr) + So(ok, ShouldBeTrue) + So(userState.Permission, ShouldNotBeNil) + So(userState.Permission.Role, ShouldEqual, types.Write) + So(userState.Status, ShouldEqual, types.Normal) + + Convey("success writing and failed to read", func() { + // sending read query + var readQuery *types.Request + var queryRes *types.Response + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, readQuery, &queryRes) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldContainSubstring, ErrPermissionDeny.Error()) + + // sending write query + var writeQuery *types.Request + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) + So(err, ShouldBeNil) + + err = testRequest(route.DBSQuery, writeQuery, &queryRes) + So(err, ShouldBeNil) + err = queryRes.Verify() + So(err, ShouldBeNil) + So(queryRes.Header.RowCount, ShouldEqual, 0) + }) }) // grant invalid permission @@ -243,10 +298,12 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 5, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -254,9 +311,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 6, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -280,10 +339,12 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 7, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -291,9 +352,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 8, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -314,10 +377,12 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 9, dbID, []string{ - "create table test (test int)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -328,9 +393,11 @@ func TestDBMS(t *testing.T) { // sending read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 10, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -376,10 +443,12 @@ func TestDBMS(t *testing.T) { var queryRes *types.Response // sending allowed write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 11, dbID, []string{ - "create table test (test int)", - "INSERT INTO TEST VALUES(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "create table test (test int)", + "INSERT INTO TEST VALUES(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) @@ -390,9 +459,11 @@ func TestDBMS(t *testing.T) { // sending allowed read query var readQuery *types.Request - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 12, dbID, []string{ - "SELECT 1", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "SELECT 1", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) @@ -405,35 +476,43 @@ func TestDBMS(t *testing.T) { So(queryRes.Payload.Rows[0].Values[0], ShouldEqual, 1) // sending disallowed write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 13, dbID, []string{ - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldNotBeNil) // sending disallowed write query mixed with valid write query - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 14, dbID, []string{ - "INSERT INTO TEST VALUES(1)", - "insert into test values(1)", - }) + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "INSERT INTO TEST VALUES(1)", + "insert into test values(1)", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldNotBeNil) // sending disallowed read query - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 15, dbID, []string{ - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldNotBeNil) // sending disallowed read query - readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, 1, 16, dbID, []string{ - "SELECT 1", - "select * from test", - }) + readQuery, err = buildQueryWithDatabaseID(types.ReadQuery, + 1, atomic.AddUint64(&seqNo, 1), + dbID, []string{ + "SELECT 1", + "select * from test", + }) So(err, ShouldBeNil) err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldNotBeNil) @@ -453,7 +532,8 @@ func TestDBMS(t *testing.T) { // sending write query var writeQuery *types.Request var queryRes *types.Response - writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, 1, 1, + writeQuery, err = buildQueryWithDatabaseID(types.WriteQuery, + 1, atomic.AddUint64(&seqNo, 1), proto.DatabaseID("db_not_exists"), []string{ "create table test (test int)", "insert into test values(1)", From 147a9abe6a0a3b82f9155249cde73411742b0f6d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 28 Jan 2019 16:26:08 +0800 Subject: [PATCH 253/302] Fix permission compatibility issues --- worker/dbms.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/worker/dbms.go b/worker/dbms.go index 028025b84..b6d3e59e9 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -469,7 +469,7 @@ func (dbms *DBMS) Ack(ack *types.Ack) (err error) { if err != nil { return } - err = dbms.checkPermission(addr, ack.Header.Response.Request.DatabaseID, types.ReadQuery) + err = dbms.checkPermission(addr, ack.Header.Response.Request.DatabaseID, types.ReadQuery, nil) if err != nil { return } From ea23a50437011a203d097625a5e137495155c83b Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Tue, 29 Jan 2019 10:36:17 +0800 Subject: [PATCH 254/302] Reuse wait group --- rpc/rpcutil_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 22b121c83..aa58c9487 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -451,7 +451,8 @@ func TestRecordRPCCost(t *testing.T) { start = time.Now() rounds = 1000 concurrent = 10 - body = func(wg *sync.WaitGroup, i int) { + wg = &sync.WaitGroup{} + body = func(i int) { defer func() { c.So(recover(), ShouldBeNil) wg.Done() @@ -460,10 +461,9 @@ func TestRecordRPCCost(t *testing.T) { } ) for i := 0; i < rounds; i++ { - var wg = &sync.WaitGroup{} for j := 0; j < concurrent; j++ { wg.Add(1) - go body(wg, i) + go body(i) } wg.Wait() } From 93308c3d9fca337fc38f532df2f0b678567dd1b9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 15:52:15 +0800 Subject: [PATCH 255/302] Simplify super user check logic in metastate --- blockproducer/metastate.go | 36 ++++++++++++++++-------------------- 1 file changed, 16 insertions(+), 20 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 68db687c3..79e19abe9 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -894,11 +894,16 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } // check whether sender has super privilege and find targetUser - isSuperUser := false numOfSuperUsers := 0 targetUserIndex := -1 for i, u := range so.Users { - isSuperUser = isSuperUser || (sender == u.Address && u.Permission.HasSuperPermission()) + if sender == u.Address && !u.Permission.HasSuperPermission() { + log.WithFields(log.Fields{ + "sender": sender, + "dbID": tx.TargetSQLChain, + }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updatePermission") + return ErrAccountPermissionDeny + } if u.Permission.HasSuperPermission() { numOfSuperUsers++ } @@ -907,14 +912,6 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { } } - if !isSuperUser { - log.WithFields(log.Fields{ - "sender": sender, - "dbID": tx.TargetSQLChain, - }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updatePermission") - return ErrAccountPermissionDeny - } - // return error if number of Admin <= 1 and Admin want to revoke permission of itself if numOfSuperUsers <= 1 && tx.TargetUser == sender && !tx.Permission.HasSuperPermission() { err = ErrNoSuperUserLeft @@ -952,20 +949,19 @@ func (s *metaState) updateKeys(tx *types.IssueKeys) (err error) { } // check sender's permission - isSuperUser := false for _, user := range so.Users { - if sender == user.Address && user.Permission.HasSuperPermission() { - isSuperUser = true + if sender == user.Address { + if !user.Permission.HasSuperPermission() { + log.WithFields(log.Fields{ + "sender": sender, + "dbID": tx.TargetSQLChain, + }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updateKeys") + return ErrAccountPermissionDeny + } + break } } - if !isSuperUser { - log.WithFields(log.Fields{ - "sender": sender, - "dbID": tx.TargetSQLChain, - }).WithError(ErrAccountPermissionDeny).Error("unexpected error in updateKeys") - return ErrAccountPermissionDeny - } // update miner's key keyMap := make(map[proto.AccountAddress]string) From c018bc588a3e78d558557d882517ed476703215f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 16:04:51 +0800 Subject: [PATCH 256/302] Update account MarshalHash --- types/account_gen.go | 32 -------------------------------- types/account_gen_test.go | 37 ------------------------------------- 2 files changed, 69 deletions(-) diff --git a/types/account_gen.go b/types/account_gen.go index 2807344b4..320d3ebd0 100644 --- a/types/account_gen.go +++ b/types/account_gen.go @@ -89,38 +89,6 @@ func (z *MinerInfo) Msgsize() (s int) { return } -// MarshalHash marshals for hash -func (z *PermStat) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82, 0x82) - if z.Permission == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Permission.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = append(o, 0x82) - o = hsp.AppendInt32(o, int32(z.Status)) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *PermStat) Msgsize() (s int) { - s = 1 + 11 - if z.Permission == nil { - s += hsp.NilSize - } else { - s += z.Permission.Msgsize() - } - s += 7 + hsp.Int32Size - return -} - // MarshalHash marshals for hash func (z *ProviderProfile) MarshalHash() (o []byte, err error) { var b []byte diff --git a/types/account_gen_test.go b/types/account_gen_test.go index 9b6a8a5d3..388a19ddb 100644 --- a/types/account_gen_test.go +++ b/types/account_gen_test.go @@ -83,43 +83,6 @@ func BenchmarkAppendMsgMinerInfo(b *testing.B) { } } -func TestMarshalHashPermStat(t *testing.T) { - v := PermStat{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashPermStat(b *testing.B) { - v := PermStat{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgPermStat(b *testing.B) { - v := PermStat{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - func TestMarshalHashProviderProfile(t *testing.T) { v := ProviderProfile{} binary.Read(rand.Reader, binary.BigEndian, &v) From a52981f3f836a72815d3efedcb2da63f9637b602 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 14:38:59 +0800 Subject: [PATCH 257/302] Fix block producer irreversible block test --- cmd/cqld/cqld_test.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/cqld/cqld_test.go b/cmd/cqld/cqld_test.go index bbadb0ae3..0e193eee9 100644 --- a/cmd/cqld/cqld_test.go +++ b/cmd/cqld/cqld_test.go @@ -27,6 +27,7 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" @@ -67,13 +68,18 @@ func TestCQLD(t *testing.T) { // Wait for block producing time.Sleep(15 * time.Second) - // Kill one BP + // Kill one BP follower err = nodeCmds[2].Cmd.Process.Signal(syscall.SIGTERM) So(err, ShouldBeNil) time.Sleep(15 * time.Second) // set current bp to leader bp - rpc.SetCurrentBP(route.GetBPs()[0]) + for _, n := range conf.GConf.KnownNodes { + if n.Role == proto.Leader { + rpc.SetCurrentBP(n.ID) + break + } + } // The other peers should be waiting var ( From a67c156e9a309a69bd513aa925b8c03201468bc7 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 14:22:02 +0800 Subject: [PATCH 258/302] Temporary disable query cancel test case --- cmd/cql-minerd/integration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 8d7351f82..fc6f8f5bc 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -490,7 +490,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) So(resultBytes, ShouldResemble, []byte("ha\001ppy")) - Convey("test query cancel", FailureContinues, func(c C) { + SkipConvey("test query cancel", FailureContinues, func(c C) { /* test cancel write query */ wg := sync.WaitGroup{} wg.Add(1) From cae71f24885f2d914af6a24164dbce4141a1ee70 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 10 Jan 2019 10:51:50 +0800 Subject: [PATCH 259/302] Fix log field name typo --- kayak/runtime.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 7c0b0083f..3ca7c2176 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -584,7 +584,7 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { fields["commit_dequeue"] = tmCommitDequeue.Sub(tmCheckPrepareFinished).Nanoseconds() } if tmMark.After(tmCommitDequeue) { - fields["commit_dequeue"] = tmMark.Sub(tmCommitDequeue).Nanoseconds() + fields["commit_mark"] = tmMark.Sub(tmCommitDequeue).Nanoseconds() } log.WithFields(fields).Debug("kayak follower commit stat") }() From 40ca3c31adc1fbe587f60a9f0494d8a4528b02f8 Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 10 Jan 2019 10:54:04 +0800 Subject: [PATCH 260/302] Fix comment typo --- kayak/runtime.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 3ca7c2176..f2eac9df4 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -115,7 +115,7 @@ type commitReq struct { result chan *commitResult } -// followerCommitResult defines the commit operation result. +// commitResult defines the commit operation result. type commitResult struct { start time.Time dbCost time.Duration From 0105c4f0c52ab7eec3c553d6656b7af9a65ee92d Mon Sep 17 00:00:00 2001 From: leventeliu Date: Thu, 10 Jan 2019 14:16:32 +0800 Subject: [PATCH 261/302] Complete kayak trace logs --- kayak/runtime.go | 84 ++++++++++++++++++++++++++++++++++-------------- 1 file changed, 60 insertions(+), 24 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index f2eac9df4..b405f3de5 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -107,21 +107,28 @@ type Runtime struct { // commitReq defines the commit operation input. type commitReq struct { - ctx context.Context - data interface{} - index uint64 - lastCommit uint64 - log *kt.Log - result chan *commitResult + ctx context.Context + data interface{} + index uint64 + lastCommit uint64 + log *kt.Log + result chan *commitResult + tmStart time.Time + tmDecode time.Time + tmCommitEnqueue atomic.Value } // commitResult defines the commit operation result. type commitResult struct { - start time.Time - dbCost time.Duration - result interface{} - err error - rpc *rpcTracker + decodeCost time.Duration + enqueueCost time.Duration + queuedCost time.Duration + start time.Time + walCost time.Duration + dbCost time.Duration + result interface{} + err error + rpc *rpcTracker } // NewRuntime creates new kayak Runtime. @@ -563,13 +570,11 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { cResult *commitResult tmStart = time.Now() - tmGetPrepareLog, tmCheckPrepareFinished, tmCommitDequeue, tmMark time.Time + tmGetPrepareLog, tmCheckPrepareFinished, tmFollowerCommit, tmMark time.Time ) defer func() { - var fields = log.Fields{ - "index": l.Index, - } + var fields = log.Fields{"index": l.Index} if tmGetPrepareLog.After(tmStart) { fields["get_prepare_log"] = tmGetPrepareLog.Sub(tmStart).Nanoseconds() } @@ -577,14 +582,18 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { fields["check_prepare_finish"] = tmCheckPrepareFinished.Sub(tmGetPrepareLog).Nanoseconds() } - if cResult != nil && cResult.dbCost > 0 { + if cResult != nil { + fields["decode_cost"] = cResult.decodeCost.Nanoseconds() + fields["enqueue_cost"] = cResult.enqueueCost.Nanoseconds() + fields["queued_cost"] = cResult.queuedCost.Nanoseconds() + fields["wal_cost"] = cResult.walCost.Nanoseconds() fields["database_cost"] = cResult.dbCost.Nanoseconds() } - if tmCommitDequeue.After(tmCheckPrepareFinished) { - fields["commit_dequeue"] = tmCommitDequeue.Sub(tmCheckPrepareFinished).Nanoseconds() + if tmFollowerCommit.After(tmCheckPrepareFinished) { + fields["dequeue_result"] = tmFollowerCommit.Sub(tmCheckPrepareFinished).Nanoseconds() } - if tmMark.After(tmCommitDequeue) { - fields["commit_mark"] = tmMark.Sub(tmCommitDequeue).Nanoseconds() + if tmMark.After(tmFollowerCommit) { + fields["commit_mark"] = tmMark.Sub(tmFollowerCommit).Nanoseconds() } log.WithFields(fields).Debug("kayak follower commit stat") }() @@ -606,7 +615,7 @@ func (r *Runtime) followerCommit(l *kt.Log) (err error) { if cResult != nil { err = cResult.err } - tmCommitDequeue = time.Now() + tmFollowerCommit = time.Now() r.markPrepareFinished(l.Index) tmMark = time.Now() @@ -646,6 +655,11 @@ func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, p // decode log and send to commit channel to process res = make(chan *commitResult, 1) + var ( + tmStart = time.Now() + tmDecode time.Time + ) + if prepareLog == nil { res <- &commitResult{ err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), @@ -677,6 +691,7 @@ func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, p } return } + tmDecode = time.Now() req := &commitReq{ ctx: ctx, @@ -685,13 +700,15 @@ func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, p lastCommit: lastCommit, result: res, log: commitLog, + tmStart: tmStart, + tmDecode: tmDecode, } select { case <-ctx.Done(): case r.commitCh <- req: + req.tmCommitEnqueue.Store(time.Now()) } - return } @@ -771,7 +788,17 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { return } - var tmStart = time.Now() + var ( + tmStart = time.Now() + ok bool + + tmCommitEnqueue, tmWriteWAL time.Time + ) + + if tmCommitEnqueue, ok = req.tmCommitEnqueue.Load().(time.Time); !ok { + tmCommitEnqueue = tmStart + } + // check for last commit availability myLastCommit := atomic.LoadUint64(&r.lastCommit) if req.lastCommit != myLastCommit { @@ -787,6 +814,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { err = errors.Wrap(err, "write follower commit log failed") return } + tmWriteWAL = time.Now() // do commit, not wrapping underlying handler commit error _, err = r.sh.Commit(req.data) @@ -794,7 +822,15 @@ func (r *Runtime) followerDoCommit(req *commitReq) (err error) { // mark last commit atomic.StoreUint64(&r.lastCommit, req.log.Index) - req.result <- &commitResult{err: err, dbCost: time.Since(tmStart)} + req.result <- &commitResult{ + err: err, + start: tmStart, + decodeCost: req.tmDecode.Sub(req.tmStart), + enqueueCost: tmCommitEnqueue.Sub(req.tmDecode), + queuedCost: tmStart.Sub(tmCommitEnqueue), + walCost: tmWriteWAL.Sub(tmStart), + dbCost: time.Since(tmWriteWAL), + } return } From 133add266b35f0087803d443f0d683f3213c32b5 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 11:35:45 +0800 Subject: [PATCH 262/302] Add backward compatible trace lib --- cmd/cql-minerd/main.go | 38 +++++++++++----------- utils/trace/trace_dummy.go | 64 ++++++++++++++++++++++++++++++++++++++ utils/trace/trace_go111.go | 60 +++++++++++++++++++++++++++++++++++ worker/dbms_rpc.go | 11 ------- xenomint/mux.go | 5 --- xenomint/xxx_test.go | 17 +--------- 6 files changed, 143 insertions(+), 52 deletions(-) create mode 100644 utils/trace/trace_dummy.go create mode 100644 utils/trace/trace_go111.go diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index c78b84af9..99b2949fd 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -26,19 +26,17 @@ import ( "os" "os/signal" "runtime" - - "github.com/CovenantSQL/CovenantSQL/metric" - - //"runtime/trace" "syscall" "time" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/CovenantSQL/CovenantSQL/worker" graphite "github.com/cyberdelia/go-metrics-graphite" metrics "github.com/rcrowley/go-metrics" @@ -257,22 +255,22 @@ func main() { go graphite.Graphite(metrics.DefaultRegistry, 5*time.Second, minerName, addr) } - //if traceFile != "" { - // f, err := os.Create(traceFile) - // if err != nil { - // log.WithError(err).Fatal("failed to create trace output file") - // } - // defer func() { - // if err := f.Close(); err != nil { - // log.WithError(err).Fatal("failed to close trace file") - // } - // }() - - // if err := trace.Start(f); err != nil { - // log.WithError(err).Fatal("failed to start trace") - // } - // defer trace.Stop() - //} + if traceFile != "" { + f, err := os.Create(traceFile) + if err != nil { + log.WithError(err).Fatal("failed to create trace output file") + } + defer func() { + if err := f.Close(); err != nil { + log.WithError(err).Fatal("failed to close trace file") + } + }() + + if err := trace.Start(f); err != nil { + log.WithError(err).Fatal("failed to start trace") + } + defer trace.Stop() + } <-signalCh utils.StopProfile() diff --git a/utils/trace/trace_dummy.go b/utils/trace/trace_dummy.go new file mode 100644 index 000000000..d24a32299 --- /dev/null +++ b/utils/trace/trace_dummy.go @@ -0,0 +1,64 @@ +// +build !go1.11 + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package trace + +import ( + "context" + "io" +) + +type Task struct{} + +func (t *Task) End() {} + +type Region struct{} + +func (r *Region) End() {} + +func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { + return ctx, &Task{} +} + +func StartRegion(ctx context.Context, regionType string) { + return +} + +func WithRegion(ctx context.Context, regionType string, fn func()) { + return +} + +func IsEnabled() { + return false +} + +func Log(ctx context.Context, category, message string) { + return +} + +func Logf(ctx context.Context, category, message string, args ...interface{}) { + return +} + +func Start(w io.Writer) (err error) { + return +} + +func Stop() { + return +} diff --git a/utils/trace/trace_go111.go b/utils/trace/trace_go111.go new file mode 100644 index 000000000..61f45c50f --- /dev/null +++ b/utils/trace/trace_go111.go @@ -0,0 +1,60 @@ +// +build go1.11 + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package trace + +import ( + "context" + "io" + "runtime/trace" +) + +type Task = trace.Task +type Region = trace.Region + +func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { + return trace.NewTask(pctx, taskType) +} + +func StartRegion(ctx context.Context, regionType string) (region *Region) { + return trace.StartRegion(ctx, regionType) +} + +func WithRegion(ctx context.Context, regionType string, fn func()) { + trace.WithRegion(ctx, regionType, fn) +} + +func IsEnabled() bool { + return trace.IsEnabled() +} + +func Log(ctx context.Context, category, message string) { + trace.Log(ctx, category, message) +} + +func Logf(ctx context.Context, category, message string, args ...interface{}) { + trace.Logf(ctx, category, message, args...) +} + +func Start(w io.Writer) (err error) { + return trace.Start(w) +} + +func Stop() { + trace.Stop() +} diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index 1fd40fd2a..e3d5dda3b 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -18,8 +18,6 @@ package worker import ( "github.com/CovenantSQL/CovenantSQL/proto" - //"context" - //"runtime/trace" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" @@ -82,10 +80,6 @@ func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err e // dbQueryFailCounter.Mark(1) // return //} - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "Query") - //defer task.End() - //defer trace.StartRegion(ctx, "QueryRegion").End() // verify query is sent from the request node if req.Envelope.NodeID.String() != string(req.Header.NodeID) { // node id mismatch @@ -112,11 +106,6 @@ func (rpc *DBMSRPCService) Ack(ack *types.Ack, _ *types.AckResponse) (err error) //if err = ack.Verify(); err != nil { // return //} - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "Ack") - //defer task.End() - //defer trace.StartRegion(ctx, "AckRegion").End() - // verify if ack node is the original ack node if ack.Envelope.NodeID.String() != string(ack.Header.Response.Request.NodeID) { err = errors.Wrap(ErrInvalidRequest, "request node id mismatch in ack") diff --git a/xenomint/mux.go b/xenomint/mux.go index 48806ade9..520a81fe9 100644 --- a/xenomint/mux.go +++ b/xenomint/mux.go @@ -17,8 +17,6 @@ package xenomint import ( - //"context" - //"runtime/trace" "sync" "time" @@ -87,9 +85,6 @@ type MuxQueryResponse struct { // Query is the RPC method to process database query on mux service. func (s *MuxService) Query(req *MuxQueryRequest, resp *MuxQueryResponse) (err error) { - //var ctx, task = trace.NewTask(context.Background(), "MuxService.Query") - //defer task.End() - //defer trace.StartRegion(ctx, "Total").End() var ( c *Chain r *types.Response diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go index eb2eaf603..a670fca3e 100644 --- a/xenomint/xxx_test.go +++ b/xenomint/xxx_test.go @@ -21,10 +21,8 @@ import ( "math/rand" "os" "path" - "sync/atomic" - - //"runtime/trace" "sync" + "sync/atomic" "syscall" "testing" "time" @@ -227,20 +225,7 @@ func setup() { panic(err) } - // Setup runtime trace for testing - //if testingTraceFile, err = ioutil.TempFile("", "CovenantSQL.trace."); err != nil { - // panic(err) - //} - //if err = trace.Start(testingTraceFile); err != nil { - // panic(err) - //} - log.SetLevel(log.DebugLevel) - //fl, err := os.OpenFile("./xenomint_test.log", os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0666) - //if err != nil { - // panic(err) - //} - //log.SetOutput(fl) log.SetOutput(os.Stdout) } From 656cece8224101b57bc7b0e8a77992d3132c7907 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 12:55:16 +0800 Subject: [PATCH 263/302] Add timer feature for performance analysis --- utils/timer/timer.go | 90 +++++++++++++++++++++++++++++++++++++++ utils/timer/timer_test.go | 51 ++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 utils/timer/timer.go create mode 100644 utils/timer/timer_test.go diff --git a/utils/timer/timer.go b/utils/timer/timer.go new file mode 100644 index 000000000..4a028269f --- /dev/null +++ b/utils/timer/timer.go @@ -0,0 +1,90 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package timer + +import ( + "sync" + "time" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +// Timer defines a stop watch timer for performance analysis. +type Timer struct { + sync.Mutex + start time.Time + names []string + pivots []time.Time +} + +// NewTimer returns a new stop watch timer instance. +func NewTimer() *Timer { + return &Timer{ + start: time.Now(), + } +} + +// Add records a time pivot. +func (t *Timer) Add(name string) { + t.Lock() + defer t.Unlock() + + t.names = append(t.names, name) + t.pivots = append(t.pivots, time.Now()) +} + +// ToLogFields returns analysis results as log fields. +func (t *Timer) ToLogFields() log.Fields { + var ( + m = t.ToMap() + f = log.Fields{} + ) + + for k, v := range m { + f[k] = v + } + + return f +} + +// ToMap returns analysis results as time duration map. +func (t *Timer) ToMap() map[string]time.Duration { + t.Lock() + defer t.Unlock() + + // calc + lp := len(t.pivots) + m := make(map[string]time.Duration, 1+lp) + + for i := 0; i != lp; i++ { + var d time.Duration + if i == 0 { + d = t.pivots[i].Sub(t.start) + } else { + d = t.pivots[i].Sub(t.pivots[i-1]) + } + + m[t.names[i]] = d + + if i+1 == lp { + // last one + m["total"] = t.pivots[i].Sub(t.start) + } + } + + return m +} diff --git a/utils/timer/timer_test.go b/utils/timer/timer_test.go new file mode 100644 index 000000000..c9fc08e99 --- /dev/null +++ b/utils/timer/timer_test.go @@ -0,0 +1,51 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package timer + +import ( + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestTimer(t *testing.T) { + Convey("test timer", t, func() { + t := NewTimer() + time.Sleep(time.Millisecond * 100) + t.Add("stage1") + + time.Sleep(time.Second * 1) + t.Add("stage2") + + m := t.ToMap() + So(m, ShouldHaveLength, 3) + So(m, ShouldContainKey, "stage1") + So(m, ShouldContainKey, "stage2") + So(m["stage1"], ShouldBeGreaterThanOrEqualTo, time.Millisecond*100) + So(m["stage2"], ShouldBeGreaterThanOrEqualTo, time.Second) + So(m["total"], ShouldBeGreaterThanOrEqualTo, time.Second+time.Millisecond*100) + + f := t.ToLogFields() + So(f, ShouldHaveLength, 3) + So(f, ShouldContainKey, "stage1") + So(f, ShouldContainKey, "stage2") + So(m["stage1"], ShouldEqual, f["stage1"]) + So(m["stage2"], ShouldEqual, f["stage2"]) + So(m["total"], ShouldEqual, f["total"]) + }) +} From ecd7e7dbb9a4bb8ba347ea93bc8d80241c4b8572 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 13:19:47 +0800 Subject: [PATCH 264/302] Add comment to runtime/trace wrapper --- utils/trace/trace_dummy.go | 12 ++++++++++++ utils/trace/trace_go111.go | 11 +++++++++++ 2 files changed, 23 insertions(+) diff --git a/utils/trace/trace_dummy.go b/utils/trace/trace_dummy.go index d24a32299..1d8d40dba 100644 --- a/utils/trace/trace_dummy.go +++ b/utils/trace/trace_dummy.go @@ -23,42 +23,54 @@ import ( "io" ) +// Task mocks runtime/trace.Task. type Task struct{} +// End mocks runtime/trace.Task.End. func (t *Task) End() {} +// Region mocks runtime/trace.Region. type Region struct{} +// End mocks runtime/trace.Region.End. func (r *Region) End() {} +// NewTask mocks runtime/trace.NewTask. func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { return ctx, &Task{} } +// StartRegion mocks runtime/trace.StartRegion. func StartRegion(ctx context.Context, regionType string) { return } +// WithRegion mocks runtime/trace.WithRegion. func WithRegion(ctx context.Context, regionType string, fn func()) { return } +// IsEnabled mocks runtime/trace.IsEnabled. func IsEnabled() { return false } +// Log mocks runtime/trace.Log. func Log(ctx context.Context, category, message string) { return } +// Logf mocks runtime/trace.Logf. func Logf(ctx context.Context, category, message string, args ...interface{}) { return } +// Start mocks runtime/trace.Start. func Start(w io.Writer) (err error) { return } +// Stop mocks runtime/trace.Stop. func Stop() { return } diff --git a/utils/trace/trace_go111.go b/utils/trace/trace_go111.go index 61f45c50f..ea0cd040a 100644 --- a/utils/trace/trace_go111.go +++ b/utils/trace/trace_go111.go @@ -24,37 +24,48 @@ import ( "runtime/trace" ) +// Task wraps runtime.trace.Task. type Task = trace.Task + +// Region wraps runtime/trace.Task. type Region = trace.Region +// NewTask wraps runtime/trace.NewTask. func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { return trace.NewTask(pctx, taskType) } +// StartRegion wraps runtime/trace.StartRegion. func StartRegion(ctx context.Context, regionType string) (region *Region) { return trace.StartRegion(ctx, regionType) } +// WithRegion wraps runtime/trace.WithRegion. func WithRegion(ctx context.Context, regionType string, fn func()) { trace.WithRegion(ctx, regionType, fn) } +// IsEnabled wraps runtime/trace.IsEnabled. func IsEnabled() bool { return trace.IsEnabled() } +// Log wraps runtime/trace.Log. func Log(ctx context.Context, category, message string) { trace.Log(ctx, category, message) } +// Logf wraps runtime/trace.Logf. func Logf(ctx context.Context, category, message string, args ...interface{}) { trace.Logf(ctx, category, message, args...) } +// Start wraps runtime/trace.Start. func Start(w io.Writer) (err error) { return trace.Start(w) } +// Stop wraps runtime/trace.Stop. func Stop() { trace.Stop() } From db099cf4978da971d3717f042101b80542b6b561 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 19:38:19 +0800 Subject: [PATCH 265/302] Add trace to kayak --- kayak/callbacks.go | 53 +++ kayak/commit.go | 245 ++++++++++++ kayak/log.go | 121 ++++++ kayak/processes.go | 219 ++++++++++ kayak/rpc.go | 56 +++ kayak/runtime.go | 789 ++++--------------------------------- kayak/runtime_test.go | 17 +- kayak/utils.go | 52 +++ utils/trace/trace_dummy.go | 10 +- 9 files changed, 842 insertions(+), 720 deletions(-) create mode 100644 kayak/callbacks.go create mode 100644 kayak/commit.go create mode 100644 kayak/log.go create mode 100644 kayak/processes.go create mode 100644 kayak/rpc.go create mode 100644 kayak/utils.go diff --git a/kayak/callbacks.go b/kayak/callbacks.go new file mode 100644 index 000000000..e75e8119f --- /dev/null +++ b/kayak/callbacks.go @@ -0,0 +1,53 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) doCheck(ctx context.Context, req interface{}) (err error) { + defer trace.StartRegion(ctx, "check").End() + if err = r.sh.Check(req); err != nil { + err = errors.Wrap(err, "verify log") + } + + return +} + +func (r *Runtime) doEncodePayload(ctx context.Context, req interface{}) (enc []byte, err error) { + defer trace.StartRegion(ctx, "encodePayload").End() + if enc, err = r.sh.EncodePayload(req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + } + return +} + +func (r *Runtime) doDecodePayload(ctx context.Context, data []byte) (req interface{}, err error) { + defer trace.StartRegion(ctx, "decodePayload").End() + if req, err = r.sh.DecodePayload(data); err != nil { + err = errors.Wrap(err, "decode kayak payload failed") + } + return +} + +func (r *Runtime) doCommit(ctx context.Context, req interface{}) (result interface{}, err error) { + defer trace.StartRegion(ctx, "commit").End() + return r.sh.Commit(req) +} diff --git a/kayak/commit.go b/kayak/commit.go new file mode 100644 index 000000000..74867f19d --- /dev/null +++ b/kayak/commit.go @@ -0,0 +1,245 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" + "sync/atomic" +) + +func (r *Runtime) leaderCommitResult(ctx context.Context, tm *timer.Timer, reqPayload interface{}, prepareLog *kt.Log) (res *commitFuture) { + // decode log and send to commit channel to process + res = newCommitFuture() + + if prepareLog == nil { + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit")}) + return + } + + // decode prepare log + req := &commitReq{ + ctx: ctx, + data: reqPayload, + index: prepareLog.Index, + result: res, + tm: tm, + } + + select { + case <-ctx.Done(): + res = nil + case r.commitCh <- req: + } + + return +} + +func (r *Runtime) followerCommitResult(ctx context.Context, tm *timer.Timer, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res *commitFuture) { + // decode log and send to commit channel to process + res = newCommitFuture() + + if prepareLog == nil { + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit")}) + return + } + + myLastCommit := atomic.LoadUint64(&r.lastCommit) + + // check committed index + if lastCommit < myLastCommit { + // leader pushed a early index before commit + log.WithFields(log.Fields{ + "head": myLastCommit, + "supplied": lastCommit, + }).Warning("invalid last commit log") + res.Set(&commitResult{err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index")}) + return + } + + // decode prepare log + var logReq interface{} + var err error + if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { + res.Set(&commitResult{err: errors.Wrap(err, "decode log payload failed")}) + return + } + + tm.Add("decode_payload") + + req := &commitReq{ + ctx: ctx, + data: logReq, + index: prepareLog.Index, + lastCommit: lastCommit, + result: res, + log: commitLog, + tm: tm, + } + + select { + case <-ctx.Done(): + case r.commitCh <- req: + tm.Add("enqueue") + } + + return +} + +func (r *Runtime) commitCycle() { + for { + var cReq *commitReq + + select { + case <-r.stopCh: + return + case cReq = <-r.commitCh: + } + + if cReq != nil { + r.doCommitCycle(cReq) + } + } +} + +func (r *Runtime) leaderDoCommit(req *commitReq) () { + if req.log != nil { + // mis-use follower commit for leader + log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") + return + } + + // create leader log + var ( + l *kt.Log + logData []byte + cr = &commitResult{} + err error + ) + + logData = append(logData, r.uint64ToBytes(req.index)...) + logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) + + if l, err = r.newLog(req.ctx, kt.LogCommit, logData); err != nil { + // serve error, leader could not write log + return + } + + req.tm.Add("write_wal") + + // not wrapping underlying handler commit error + cr.result, err = r.doCommit(req.ctx, req.data) + + req.tm.Add("db_write") + + // mark last commit + atomic.StoreUint64(&r.lastCommit, l.Index) + + // send commit + cr.rpc = r.rpc(l, r.minCommitFollowers) + cr.index = l.Index + cr.err = err + + // TODO(): text log for rpc errors + + // TODO(): mark uncommitted nodes and remove from peers + + req.result.Set(cr) + + req.tm.Add("send_follower_commit") + + return +} + +func (r *Runtime) followerDoCommit(req *commitReq) { + if req.log == nil { + log.Fatal("NO LOG FOR FOLLOWER COMMIT") + return + } + + // check for last commit availability + myLastCommit := atomic.LoadUint64(&r.lastCommit) + if req.lastCommit != myLastCommit { + // TODO(): need counter for retries, infinite commit re-order would cause troubles + go func(req *commitReq) { + r.commitCh <- req + }(req) + return + } + + req.tm.Add("wait_last_commit") + + var err error + + // write log first + if err = r.writeWAL(req.ctx, req.log); err != nil { + return + } + + req.tm.Add("write_wal") + + // do commit, not wrapping underlying handler commit error + _, err = r.doCommit(req.ctx, req.data) + + req.tm.Add("db_write") + + // mark last commit + atomic.StoreUint64(&r.lastCommit, req.log.Index) + + req.result.Set(&commitResult{ + err: err, + }) + + return +} + +func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { + var prepareIndex uint64 + + // decode prepare index + if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { + err = errors.Wrap(err, "log does not contain valid prepare index") + return + } + + // decode commit index + if len(l.Data) >= 16 { + lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) + } + + pl, err = r.wal.Get(prepareIndex) + + return +} + +func (r *Runtime) doCommitCycle(req *commitReq) { + defer trace.StartRegion(req.ctx, "commitCycle").End() + + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if r.role == proto.Leader { + r.leaderDoCommit(req) + } else { + r.followerDoCommit(req) + } +} diff --git a/kayak/log.go b/kayak/log.go new file mode 100644 index 000000000..d027fb094 --- /dev/null +++ b/kayak/log.go @@ -0,0 +1,121 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" + "io" + "log" +) + +func (r *Runtime) newLog(ctx context.Context, logType kt.LogType, data []byte) (l *kt.Log, err error) { + defer trace.StartRegion(ctx, "newWAL").End() + + // allocate index + r.nextIndexLock.Lock() + i := r.nextIndex + r.nextIndex++ + r.nextIndexLock.Unlock() + l = &kt.Log{ + LogHeader: kt.LogHeader{ + Index: i, + Type: logType, + Producer: r.nodeID, + }, + Data: data, + } + + // error write will be a fatal error, cause to node to fail fast + if err = r.wal.Write(l); err != nil { + log.Fatalf("WRITE LOG FAILED: %v", err) + } + + return +} + +func (r *Runtime) writeWAL(ctx context.Context, l *kt.Log) (err error) { + defer trace.StartRegion(ctx, "writeWal").End() + if err = r.wal.Write(l); err != nil { + err = errors.Wrap(err, "write follower log failed") + } + + return +} + +func (r *Runtime) readLogs() (err error) { + // load logs, only called during init + var l *kt.Log + + for { + if l, err = r.wal.Read(); err != nil && err != io.EOF { + err = errors.Wrap(err, "load previous logs in wal failed") + return + } else if err == io.EOF { + err = nil + break + } + + switch l.Type { + case kt.LogPrepare: + // record in pending prepares + r.pendingPrepares[l.Index] = true + case kt.LogCommit: + // record last commit + var lastCommit uint64 + var prepareLog *kt.Log + if lastCommit, prepareLog, err = r.getPrepareLog(context.Background(), l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if lastCommit != r.lastCommit { + err = errors.Wrapf(err, + "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + r.lastCommit = l.Index + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + case kt.LogRollback: + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(context.Background(), l); err != nil { + err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") + return + } + if !r.pendingPrepares[prepareLog.Index] { + err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") + return + } + // resolve previous prepared + delete(r.pendingPrepares, prepareLog.Index) + default: + err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) + return + } + + // record nextIndex + r.updateNextIndex(l) + } + + return +} diff --git a/kayak/processes.go b/kayak/processes.go new file mode 100644 index 000000000..4ef78558f --- /dev/null +++ b/kayak/processes.go @@ -0,0 +1,219 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" +) + +func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req interface{}) (prepareLog *kt.Log, err error) { + // check prepare in leader + if err = r.doCheck(ctx, req); err != nil { + err = errors.Wrap(err, "leader verify log") + return + } + + tm.Add("leader_check") + + // encode request + var encBuf []byte + if encBuf, err = r.doEncodePayload(ctx, req); err != nil { + err = errors.Wrap(err, "encode kayak payload failed") + return + } + + tm.Add("leader_encode_payload") + + // create prepare request + if prepareLog, err = r.leaderLogPrepare(ctx, tm, encBuf); err != nil { + // serve error, leader could not write logs, change leader in block producer + // TODO(): CHANGE LEADER + return + } + + // Leader pending map handling. + r.markPendingPrepare(prepareLog.Index) + + tm.Add("leader_prepare") + + // send prepare to all nodes + prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) + prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) + defer prepareCtxCancelFunc() + prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) + if !prepareDone { + // timeout, rollback + err = kt.ErrPrepareTimeout + return + } + + tm.Add("follower_prepare") + + // collect errors + err = r.errorSummary(prepareErrors) + + return +} + +func (r *Runtime) doLeaderCommit(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log, req interface{}) ( + result interface{}, logIndex uint64, err error) { + var commitResult *commitResult + if commitResult, err = r.leaderCommitResult(ctx, tm, req, prepareLog).Get(ctx); err != nil { + return + } + + result = commitResult.result + logIndex = commitResult.index + err = commitResult.err + + if commitResult.rpc != nil { + commitResult.rpc.get(ctx) + } + + tm.Add("wait_follower_commit") + + return +} + +func (r *Runtime) doLeaderRollback(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log) { + // rollback local + var rollbackLog *kt.Log + var logErr error + if rollbackLog, logErr = r.leaderLogRollback(ctx, tm, prepareLog.Index); logErr != nil { + // serve error, construct rollback log failed, internal error + // TODO(): CHANGE LEADER + return + } + + defer trace.StartRegion(ctx, "followerRollback").End() + + // async send rollback to all nodes + r.rpc(rollbackLog, 0) + + tm.Add("follower_rollback") +} + +func (r *Runtime) leaderLogPrepare(ctx context.Context, tm *timer.Timer, data []byte) (*kt.Log, error) { + defer trace.StartRegion(ctx, "leaderLogPrepare").End() + defer tm.Add("leader_log_prepare") + // just write new log + return r.newLog(ctx, kt.LogPrepare, data) +} + +func (r *Runtime) leaderLogRollback(ctx context.Context, tm *timer.Timer, i uint64) (*kt.Log, error) { + defer trace.StartRegion(ctx, "leaderLogRollback").End() + defer tm.Add("leader_log_rollback") + // just write new log + return r.newLog(ctx, kt.LogRollback, r.uint64ToBytes(i)) +} + +func (r *Runtime) followerPrepare(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + defer func() { + log.WithField("r", l.Index).WithFields(tm.ToLogFields()).Debug("kayak follower prepare stat") + }() + + // decode + var req interface{} + if req, err = r.doDecodePayload(ctx, l.Data); err != nil { + return + } + tm.Add("decode") + + if err = r.doCheck(ctx, req); err != nil { + return + } + tm.Add("check") + + // write log + if err = r.writeWAL(ctx, l); err != nil { + return + + } + tm.Add("write_wal") + + r.markPendingPrepare(l.Index) + tm.Add("mark") + + return +} + +func (r *Runtime) followerRollback(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + var prepareLog *kt.Log + if _, prepareLog, err = r.getPrepareLog(ctx, l); err != nil || prepareLog == nil { + err = errors.Wrap(err, "get original request in rollback failed") + return + } + tm.Add("get_prepare") + + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + tm.Add("check_prepare") + + // write wal + if err = r.writeWAL(ctx, l); err != nil { + return + } + tm.Add("write_wal") + + r.markPrepareFinished(l.Index) + tm.Add("mark") + + return +} + +func (r *Runtime) followerCommit(ctx context.Context, tm *timer.Timer, l *kt.Log) (err error) { + var ( + prepareLog *kt.Log + lastCommit uint64 + cResult *commitResult + ) + + defer func() { + log.WithField("r", l.Index).WithFields(tm.ToLogFields()).Debug("kayak follower commit stat") + }() + + if lastCommit, prepareLog, err = r.getPrepareLog(ctx, l); err != nil { + err = errors.Wrap(err, "get original request in commit failed") + return + } + tm.Add("get_prepare") + + // check if prepare already processed + if r.checkIfPrepareFinished(prepareLog.Index) { + err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") + return + } + tm.Add("check_prepare") + + cResult, err = r.followerCommitResult(ctx, tm, l, prepareLog, lastCommit).Get(ctx) + if cResult != nil { + err = cResult.err + } + + r.markPrepareFinished(l.Index) + tm.Add("mark") + + return +} diff --git a/kayak/rpc.go b/kayak/rpc.go new file mode 100644 index 000000000..4dbbe0464 --- /dev/null +++ b/kayak/rpc.go @@ -0,0 +1,56 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" +) + +func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { + failNodes := make(map[proto.NodeID]error) + + for s, err := range errs { + if err != nil { + failNodes[s] = err + } + } + + if len(failNodes) == 0 { + return nil + } + + return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) +} + +/// rpc related +func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { + req := &kt.RPCRequest{ + Instance: r.instanceID, + Log: l, + } + + tracker = newTracker(r, req, minCount) + tracker.send() + + // TODO(): track this rpc + + // TODO(): log remote errors + + return +} diff --git a/kayak/runtime.go b/kayak/runtime.go index b405f3de5..a1c33a13d 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -18,9 +18,8 @@ package kayak import ( "context" - "encoding/binary" "fmt" - "io" + "github.com/CovenantSQL/CovenantSQL/utils/timer" "math" "sync" "sync/atomic" @@ -28,8 +27,8 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -107,28 +106,50 @@ type Runtime struct { // commitReq defines the commit operation input. type commitReq struct { - ctx context.Context - data interface{} - index uint64 - lastCommit uint64 - log *kt.Log - result chan *commitResult - tmStart time.Time - tmDecode time.Time - tmCommitEnqueue atomic.Value + ctx context.Context + data interface{} + index uint64 + lastCommit uint64 + log *kt.Log + result *commitFuture + tm *timer.Timer } // commitResult defines the commit operation result. type commitResult struct { - decodeCost time.Duration - enqueueCost time.Duration - queuedCost time.Duration - start time.Time - walCost time.Duration - dbCost time.Duration - result interface{} - err error - rpc *rpcTracker + index uint64 + result interface{} + err error + rpc *rpcTracker +} + +type commitFuture struct { + ch chan *commitResult +} + +func newCommitFuture() *commitFuture { + return &commitFuture{ + ch: make(chan *commitResult), + } +} + +func (f *commitFuture) Get(ctx context.Context) (cr *commitResult, err error) { + if f == nil || f.ch == nil { + err = errors.Wrap(ctx.Err(), "enqueue commit timeout") + return + } + + select { + case <-ctx.Done(): + err = errors.Wrap(ctx.Err(), "get commit result timeout") + return + case cr = <-f.ch: + return + } +} + +func (f *commitFuture) Set(cr *commitResult) { + f.ch <- cr } // NewRuntime creates new kayak Runtime. @@ -252,162 +273,47 @@ func (r *Runtime) Shutdown() (err error) { // Apply defines entry for Leader node. func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { - var commitFuture <-chan *commitResult - var cResult *commitResult + ctx, task := trace.NewTask(ctx, "Kayak.Apply") + defer task.End() - var tmStart, tmLeaderPrepare, tmFollowerPrepare, tmCommitEnqueue, tmLeaderRollback, - tmRollback, tmCommitDequeue, tmLeaderCommit, tmCommit time.Time - var dbCost time.Duration + tm := timer.NewTimer() defer func() { - fields := log.Fields{ - "r": logIndex, - } - if !tmLeaderPrepare.Before(tmStart) { - fields["lp"] = tmLeaderPrepare.Sub(tmStart).Nanoseconds() - } - if !tmFollowerPrepare.Before(tmLeaderPrepare) { - fields["fp"] = tmFollowerPrepare.Sub(tmLeaderPrepare).Nanoseconds() - } - if !tmLeaderRollback.Before(tmFollowerPrepare) { - fields["lr"] = tmLeaderRollback.Sub(tmFollowerPrepare).Nanoseconds() - } - if !tmRollback.Before(tmLeaderRollback) { - fields["fr"] = tmRollback.Sub(tmLeaderRollback).Nanoseconds() - } - if !tmCommitEnqueue.Before(tmFollowerPrepare) { - fields["eq"] = tmCommitEnqueue.Sub(tmFollowerPrepare).Nanoseconds() - } - if !tmCommitDequeue.Before(tmCommitEnqueue) { - fields["dq"] = tmCommitDequeue.Sub(tmCommitEnqueue).Nanoseconds() - } - if !tmLeaderCommit.Before(tmCommitDequeue) { - fields["lc"] = tmLeaderCommit.Sub(tmCommitDequeue).Nanoseconds() - } - if !tmCommit.Before(tmLeaderCommit) { - fields["fc"] = tmCommit.Sub(tmLeaderCommit).Nanoseconds() - } - if dbCost > 0 { - fields["dc"] = dbCost.Nanoseconds() - } - if !tmCommit.Before(tmStart) { - fields["t"] = tmCommit.Sub(tmStart).Nanoseconds() - } else if !tmRollback.Before(tmStart) { - fields["t"] = tmRollback.Sub(tmStart).Nanoseconds() - } - log.WithFields(fields).WithError(err).Debug("kayak leader apply") + log.WithField("r", logIndex). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak leader apply") }() + waitForLockRegion := trace.StartRegion(ctx, "peersLock") + r.peersLock.RLock() defer r.peersLock.RUnlock() + tm.Add("prl") + + waitForLockRegion.End() + if r.role != proto.Leader { // not leader err = kt.ErrNotLeader return } - tmStart = time.Now() + // prepare + prepareLog, err := r.doLeaderPrepare(ctx, tm, req) - // check prepare in leader - if err = r.doCheck(req); err != nil { - err = errors.Wrap(err, "leader verify log") - return + if prepareLog != nil { + defer r.markPrepareFinished(prepareLog.Index) } - // encode request - var encBuf []byte - if encBuf, err = r.sh.EncodePayload(req); err != nil { - err = errors.Wrap(err, "encode kayak payload failed") - return - } - - // create prepare request - var prepareLog *kt.Log - if prepareLog, err = r.leaderLogPrepare(encBuf); err != nil { - // serve error, leader could not write logs, change leader in block producer - // TODO(): CHANGE LEADER - return - } - - // Leader pending map handling. - r.markPendingPrepare(prepareLog.Index) - defer r.markPrepareFinished(prepareLog.Index) - - tmLeaderPrepare = time.Now() - - // send prepare to all nodes - prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) - prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) - defer prepareCtxCancelFunc() - prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) - if !prepareDone { - // timeout, rollback - err = kt.ErrPrepareTimeout - goto ROLLBACK - } - - // collect errors - if err = r.errorSummary(prepareErrors); err != nil { - goto ROLLBACK - } - - tmFollowerPrepare = time.Now() - - commitFuture = r.leaderCommitResult(ctx, req, prepareLog) - - tmCommitEnqueue = time.Now() - - if commitFuture == nil { - logIndex = prepareLog.Index - err = errors.Wrap(ctx.Err(), "enqueue commit timeout") - goto ROLLBACK - } - - cResult = <-commitFuture - if cResult != nil { - logIndex = prepareLog.Index - result = cResult.result - err = cResult.err - - tmCommitDequeue = cResult.start - dbCost = cResult.dbCost - tmLeaderCommit = time.Now() - - // wait until context deadline or commit done - if cResult.rpc != nil { - cResult.rpc.get(ctx) - } - } else { - log.Fatal("IMPOSSIBLE BRANCH") - select { - case <-ctx.Done(): - err = errors.Wrap(ctx.Err(), "process commit timeout") - goto ROLLBACK - default: - } + if err == nil { + // commit + return r.doLeaderCommit(ctx, tm, prepareLog, req) } - tmCommit = time.Now() - - return - -ROLLBACK: - // rollback local - var rollbackLog *kt.Log - var logErr error - if rollbackLog, logErr = r.leaderLogRollback(prepareLog.Index); logErr != nil { - // serve error, construct rollback log failed, internal error - // TODO(): CHANGE LEADER - return - } - - tmLeaderRollback = time.Now() - - // async send rollback to all nodes - r.rpc(rollbackLog, 0) - - tmRollback = time.Now() + // rollback + r.doLeaderRollback(ctx, tm, prepareLog) return } @@ -419,18 +325,22 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { return } - var tmStart, tmEnd time.Time + ctx, task := trace.NewTask(context.Background(), "Kayak.FollowerApply") + defer task.End() + + tm := timer.NewTimer() defer func() { - tmEnd = time.Now() - log.WithFields(log.Fields{ - "t": l.Type.String(), - "i": l.Index, - "c": tmEnd.Sub(tmStart).Nanoseconds(), - }).WithError(err).Debug("kayak follower apply") + log. + WithFields(log.Fields{ + "t": l.Type.String(), + "i": l.Index, + }). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak follower apply") }() - tmStart = time.Now() r.peersLock.RLock() defer r.peersLock.RUnlock() @@ -443,17 +353,11 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { // verify log structure switch l.Type { case kt.LogPrepare: - err = r.followerPrepare(l) + err = r.followerPrepare(ctx, tm, l) case kt.LogRollback: - err = r.followerRollback(l) + err = r.followerRollback(ctx, tm, l) case kt.LogCommit: - err = r.followerCommit(l) - case kt.LogBarrier: - // support barrier for log truncation and peer update - fallthrough - case kt.LogNoop: - // do nothing - err = r.followerNoop(l) + err = r.followerCommit(ctx, tm, l) } if err == nil { @@ -471,475 +375,6 @@ func (r *Runtime) UpdatePeers(peers *proto.Peers) (err error) { return } -func (r *Runtime) leaderLogPrepare(data []byte) (*kt.Log, error) { - // just write new log - return r.newLog(kt.LogPrepare, data) -} - -func (r *Runtime) leaderLogRollback(i uint64) (*kt.Log, error) { - // just write new log - return r.newLog(kt.LogRollback, r.uint64ToBytes(i)) -} - -func (r *Runtime) doCheck(req interface{}) (err error) { - if err = r.sh.Check(req); err != nil { - err = errors.Wrap(err, "verify log") - return - } - - return -} - -func (r *Runtime) followerPrepare(l *kt.Log) (err error) { - var ( - tmStart = time.Now() - - tmDecode, tmCheck, tmWriteWAL, tmMark time.Time - ) - - defer func() { - var fields = log.Fields{"index": l.Index} - if tmDecode.After(tmStart) { - fields["decode"] = tmDecode.Sub(tmStart).Nanoseconds() - } - if tmCheck.After(tmDecode) { - fields["check"] = tmCheck.Sub(tmDecode).Nanoseconds() - } - if tmWriteWAL.After(tmCheck) { - fields["write_wal"] = tmWriteWAL.Sub(tmCheck).Nanoseconds() - } - if tmMark.After(tmWriteWAL) { - fields["mark"] = tmMark.Sub(tmWriteWAL).Nanoseconds() - } - log.WithFields(fields).Debug("kayak follower prepare stat") - }() - - // decode - var req interface{} - if req, err = r.sh.DecodePayload(l.Data); err != nil { - err = errors.Wrap(err, "decode kayak payload failed") - return - } - tmDecode = time.Now() - - if err = r.doCheck(req); err != nil { - return - } - tmCheck = time.Now() - - // write log - if err = r.wal.Write(l); err != nil { - err = errors.Wrap(err, "write follower prepare log failed") - return - } - tmWriteWAL = time.Now() - - r.markPendingPrepare(l.Index) - tmMark = time.Now() - - return -} - -func (r *Runtime) followerRollback(l *kt.Log) (err error) { - var prepareLog *kt.Log - if _, prepareLog, err = r.getPrepareLog(l); err != nil || prepareLog == nil { - err = errors.Wrap(err, "get original request in rollback failed") - return - } - - // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { - err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") - return - } - - // write wal - if err = r.wal.Write(l); err != nil { - err = errors.Wrap(err, "write follower rollback log failed") - } - - r.markPrepareFinished(l.Index) - - return -} - -func (r *Runtime) followerCommit(l *kt.Log) (err error) { - var ( - prepareLog *kt.Log - lastCommit uint64 - cResult *commitResult - tmStart = time.Now() - - tmGetPrepareLog, tmCheckPrepareFinished, tmFollowerCommit, tmMark time.Time - ) - - defer func() { - var fields = log.Fields{"index": l.Index} - if tmGetPrepareLog.After(tmStart) { - fields["get_prepare_log"] = tmGetPrepareLog.Sub(tmStart).Nanoseconds() - } - if tmCheckPrepareFinished.After(tmGetPrepareLog) { - fields["check_prepare_finish"] = - tmCheckPrepareFinished.Sub(tmGetPrepareLog).Nanoseconds() - } - if cResult != nil { - fields["decode_cost"] = cResult.decodeCost.Nanoseconds() - fields["enqueue_cost"] = cResult.enqueueCost.Nanoseconds() - fields["queued_cost"] = cResult.queuedCost.Nanoseconds() - fields["wal_cost"] = cResult.walCost.Nanoseconds() - fields["database_cost"] = cResult.dbCost.Nanoseconds() - } - if tmFollowerCommit.After(tmCheckPrepareFinished) { - fields["dequeue_result"] = tmFollowerCommit.Sub(tmCheckPrepareFinished).Nanoseconds() - } - if tmMark.After(tmFollowerCommit) { - fields["commit_mark"] = tmMark.Sub(tmFollowerCommit).Nanoseconds() - } - log.WithFields(fields).Debug("kayak follower commit stat") - }() - - if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "get original request in commit failed") - return - } - tmGetPrepareLog = time.Now() - - // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { - err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") - return - } - tmCheckPrepareFinished = time.Now() - - cResult = <-r.followerCommitResult(context.Background(), l, prepareLog, lastCommit) - if cResult != nil { - err = cResult.err - } - tmFollowerCommit = time.Now() - - r.markPrepareFinished(l.Index) - tmMark = time.Now() - - return -} - -func (r *Runtime) leaderCommitResult(ctx context.Context, reqPayload interface{}, prepareLog *kt.Log) (res chan *commitResult) { - // decode log and send to commit channel to process - res = make(chan *commitResult, 1) - - if prepareLog == nil { - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), - } - return - } - - // decode prepare log - req := &commitReq{ - ctx: ctx, - data: reqPayload, - index: prepareLog.Index, - result: res, - } - - select { - case <-ctx.Done(): - res = nil - case r.commitCh <- req: - } - - return -} - -func (r *Runtime) followerCommitResult(ctx context.Context, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res chan *commitResult) { - // decode log and send to commit channel to process - res = make(chan *commitResult, 1) - - var ( - tmStart = time.Now() - tmDecode time.Time - ) - - if prepareLog == nil { - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "nil prepare log in commit"), - } - return - } - - myLastCommit := atomic.LoadUint64(&r.lastCommit) - - // check committed index - if lastCommit < myLastCommit { - // leader pushed a early index before commit - log.WithFields(log.Fields{ - "head": myLastCommit, - "supplied": lastCommit, - }).Warning("invalid last commit log") - res <- &commitResult{ - err: errors.Wrap(kt.ErrInvalidLog, "invalid last commit log index"), - } - return - } - - // decode prepare log - var logReq interface{} - var err error - if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { - res <- &commitResult{ - err: errors.Wrap(err, "decode log payload failed"), - } - return - } - tmDecode = time.Now() - - req := &commitReq{ - ctx: ctx, - data: logReq, - index: prepareLog.Index, - lastCommit: lastCommit, - result: res, - log: commitLog, - tmStart: tmStart, - tmDecode: tmDecode, - } - - select { - case <-ctx.Done(): - case r.commitCh <- req: - req.tmCommitEnqueue.Store(time.Now()) - } - return -} - -func (r *Runtime) commitCycle() { - // TODO(): panic recovery - for { - var cReq *commitReq - - select { - case <-r.stopCh: - return - case cReq = <-r.commitCh: - } - - if cReq != nil { - r.doCommit(cReq) - } - } -} - -func (r *Runtime) doCommit(req *commitReq) { - r.peersLock.RLock() - defer r.peersLock.RUnlock() - - resp := &commitResult{ - start: time.Now(), - } - - if r.role == proto.Leader { - resp.dbCost, resp.rpc, resp.result, resp.err = r.leaderDoCommit(req) - req.result <- resp - } else { - r.followerDoCommit(req) - } -} - -func (r *Runtime) leaderDoCommit(req *commitReq) (dbCost time.Duration, tracker *rpcTracker, result interface{}, err error) { - if req.log != nil { - // mis-use follower commit for leader - log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") - return - } - - // create leader log - var l *kt.Log - var logData []byte - - logData = append(logData, r.uint64ToBytes(req.index)...) - logData = append(logData, r.uint64ToBytes(atomic.LoadUint64(&r.lastCommit))...) - - if l, err = r.newLog(kt.LogCommit, logData); err != nil { - // serve error, leader could not write log - return - } - - // not wrapping underlying handler commit error - tmStartDB := time.Now() - result, err = r.sh.Commit(req.data) - dbCost = time.Now().Sub(tmStartDB) - - // mark last commit - atomic.StoreUint64(&r.lastCommit, l.Index) - - // send commit - tracker = r.rpc(l, r.minCommitFollowers) - - // TODO(): text log for rpc errors - - // TODO(): mark uncommitted nodes and remove from peers - - return -} - -func (r *Runtime) followerDoCommit(req *commitReq) (err error) { - if req.log == nil { - log.Fatal("NO LOG FOR FOLLOWER COMMIT") - return - } - - var ( - tmStart = time.Now() - ok bool - - tmCommitEnqueue, tmWriteWAL time.Time - ) - - if tmCommitEnqueue, ok = req.tmCommitEnqueue.Load().(time.Time); !ok { - tmCommitEnqueue = tmStart - } - - // check for last commit availability - myLastCommit := atomic.LoadUint64(&r.lastCommit) - if req.lastCommit != myLastCommit { - // TODO(): need counter for retries, infinite commit re-order would cause troubles - go func(req *commitReq) { - r.commitCh <- req - }(req) - return - } - - // write log first - if err = r.wal.Write(req.log); err != nil { - err = errors.Wrap(err, "write follower commit log failed") - return - } - tmWriteWAL = time.Now() - - // do commit, not wrapping underlying handler commit error - _, err = r.sh.Commit(req.data) - - // mark last commit - atomic.StoreUint64(&r.lastCommit, req.log.Index) - - req.result <- &commitResult{ - err: err, - start: tmStart, - decodeCost: req.tmDecode.Sub(req.tmStart), - enqueueCost: tmCommitEnqueue.Sub(req.tmDecode), - queuedCost: tmStart.Sub(tmCommitEnqueue), - walCost: tmWriteWAL.Sub(tmStart), - dbCost: time.Since(tmWriteWAL), - } - - return -} - -func (r *Runtime) getPrepareLog(l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { - var prepareIndex uint64 - - // decode prepare index - if prepareIndex, err = r.bytesToUint64(l.Data); err != nil { - err = errors.Wrap(err, "log does not contain valid prepare index") - return - } - - // decode commit index - if len(l.Data) >= 16 { - lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) - } - - pl, err = r.wal.Get(prepareIndex) - - return -} - -func (r *Runtime) newLog(logType kt.LogType, data []byte) (l *kt.Log, err error) { - // allocate index - r.nextIndexLock.Lock() - i := r.nextIndex - r.nextIndex++ - r.nextIndexLock.Unlock() - l = &kt.Log{ - LogHeader: kt.LogHeader{ - Index: i, - Type: logType, - Producer: r.nodeID, - }, - Data: data, - } - - // error write will be a fatal error, cause to node to fail fast - if err = r.wal.Write(l); err != nil { - log.Fatalf("WRITE LOG FAILED: %v", err) - } - - return -} - -func (r *Runtime) readLogs() (err error) { - // load logs, only called during init - var l *kt.Log - - for { - if l, err = r.wal.Read(); err != nil && err != io.EOF { - err = errors.Wrap(err, "load previous logs in wal failed") - return - } else if err == io.EOF { - err = nil - break - } - - switch l.Type { - case kt.LogPrepare: - // record in pending prepares - r.pendingPrepares[l.Index] = true - case kt.LogCommit: - // record last commit - var lastCommit uint64 - var prepareLog *kt.Log - if lastCommit, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") - return - } - if lastCommit != r.lastCommit { - err = errors.Wrapf(err, - "last commit record in wal mismatched (expected: %v, actual: %v)", r.lastCommit, lastCommit) - return - } - if !r.pendingPrepares[prepareLog.Index] { - err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - return - } - r.lastCommit = l.Index - // resolve previous prepared - delete(r.pendingPrepares, prepareLog.Index) - case kt.LogRollback: - var prepareLog *kt.Log - if _, prepareLog, err = r.getPrepareLog(l); err != nil { - err = errors.Wrap(err, "previous prepare does not exists, node need full recovery") - return - } - if !r.pendingPrepares[prepareLog.Index] { - err = errors.Wrap(kt.ErrInvalidLog, "previous prepare already committed/rollback") - return - } - // resolve previous prepared - delete(r.pendingPrepares, prepareLog.Index) - case kt.LogBarrier: - case kt.LogNoop: - default: - err = errors.Wrapf(kt.ErrInvalidLog, "invalid log type: %v", l.Type) - return - } - - // record nextIndex - r.updateNextIndex(l) - } - - return -} - func (r *Runtime) updateNextIndex(l *kt.Log) { r.nextIndexLock.Lock() defer r.nextIndexLock.Unlock() @@ -969,69 +404,3 @@ func (r *Runtime) markPrepareFinished(index uint64) { delete(r.pendingPrepares, index) } - -func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { - failNodes := make(map[proto.NodeID]error) - - for s, err := range errs { - if err != nil { - failNodes[s] = err - } - } - - if len(failNodes) == 0 { - return nil - } - - return errors.Wrapf(kt.ErrPrepareFailed, "fail on nodes: %v", failNodes) -} - -/// rpc related -func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { - req := &kt.RPCRequest{ - Instance: r.instanceID, - Log: l, - } - - tracker = newTracker(r, req, minCount) - tracker.send() - - // TODO(): track this rpc - - // TODO(): log remote errors - - return -} - -func (r *Runtime) getCaller(id proto.NodeID) Caller { - var caller Caller = rpc.NewPersistentCaller(id) - rawCaller, _ := r.callerMap.LoadOrStore(id, caller) - return rawCaller.(Caller) -} - -func (r *Runtime) goFunc(f func()) { - r.wg.Add(1) - go func() { - defer r.wg.Done() - f() - }() -} - -/// utils -func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { - res = make([]byte, 8) - binary.BigEndian.PutUint64(res, i) - return -} - -func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { - if len(b) < 8 { - return 0, kt.ErrInvalidLog - } - return binary.BigEndian.Uint64(b), nil -} - -//// future extensions, barrier, noop log placeholder etc. -func (r *Runtime) followerNoop(l *kt.Log) (err error) { - return r.wal.Write(l) -} diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index d11b8e92b..b0127a58e 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -22,6 +22,7 @@ import ( "database/sql" "encoding/binary" "fmt" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "math/rand" "net" "net/rpc" @@ -38,7 +39,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - mock_conn "github.com/jordwest/mock-conn" + "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -163,7 +164,7 @@ func newFakeService(rt *kayak.Runtime) (fs *fakeService) { func (s *fakeService) Call(req *kt.RPCRequest, resp *interface{}) (err error) { // add some delay for timeout test - time.Sleep(time.Millisecond * 10) + //time.Sleep(time.Millisecond * 10) return s.rt.FollowerApply(req.Log) } @@ -196,8 +197,14 @@ func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err func TestRuntime(t *testing.T) { Convey("runtime test", t, func(c C) { lvl := log.GetLevel() - log.SetLevel(log.FatalLevel) + log.SetLevel(log.DebugLevel) defer log.SetLevel(lvl) + f, err := os.Create("trace") + So(err, ShouldBeNil) + defer f.Close() + trace.Start(f) + defer trace.Stop() + db1, err := newSQLiteStorage("test1.db") So(err, ShouldBeNil) defer func() { @@ -314,7 +321,7 @@ func TestRuntime(t *testing.T) { var count uint64 atomic.StoreUint64(&count, 1) - for i := 0; i != 100; i++ { + for i := 0; i != 2000; i++ { atomic.AddUint64(&count, 1) q := &queryStructure{ Queries: []storage.Query{ @@ -501,7 +508,7 @@ func TestRuntime(t *testing.T) { func BenchmarkRuntime(b *testing.B) { Convey("runtime test", b, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) So(err, ShouldBeNil) log.SetOutput(f) diff --git a/kayak/utils.go b/kayak/utils.go new file mode 100644 index 000000000..79382e214 --- /dev/null +++ b/kayak/utils.go @@ -0,0 +1,52 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "encoding/binary" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/rpc" +) + +func (r *Runtime) getCaller(id proto.NodeID) Caller { + var caller Caller = rpc.NewPersistentCaller(id) + rawCaller, _ := r.callerMap.LoadOrStore(id, caller) + return rawCaller.(Caller) +} + +func (r *Runtime) goFunc(f func()) { + r.wg.Add(1) + go func() { + defer r.wg.Done() + f() + }() +} + +/// utils +func (r *Runtime) uint64ToBytes(i uint64) (res []byte) { + res = make([]byte, 8) + binary.BigEndian.PutUint64(res, i) + return +} + +func (r *Runtime) bytesToUint64(b []byte) (uint64, error) { + if len(b) < 8 { + return 0, kt.ErrInvalidLog + } + return binary.BigEndian.Uint64(b), nil +} diff --git a/utils/trace/trace_dummy.go b/utils/trace/trace_dummy.go index 1d8d40dba..a399427bb 100644 --- a/utils/trace/trace_dummy.go +++ b/utils/trace/trace_dummy.go @@ -37,21 +37,21 @@ func (r *Region) End() {} // NewTask mocks runtime/trace.NewTask. func NewTask(pctx context.Context, taskType string) (ctx context.Context, task *Task) { - return ctx, &Task{} + return pctx, &Task{} } // StartRegion mocks runtime/trace.StartRegion. -func StartRegion(ctx context.Context, regionType string) { - return +func StartRegion(ctx context.Context, regionType string) (region *Region) { + return &Region{} } // WithRegion mocks runtime/trace.WithRegion. func WithRegion(ctx context.Context, regionType string, fn func()) { - return + fn() } // IsEnabled mocks runtime/trace.IsEnabled. -func IsEnabled() { +func IsEnabled() bool { return false } From a5b3d5f2c23e1f6295d1001f8d34b12b468205bd Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 22:15:32 +0800 Subject: [PATCH 266/302] Add more trace points --- kayak/commit.go | 10 +++++++--- kayak/log.go | 7 ++++--- kayak/processes.go | 17 +++++++++++------ kayak/runtime.go | 30 ++++++++++++++++++++++-------- kayak/tracker.go | 3 +++ 5 files changed, 47 insertions(+), 20 deletions(-) diff --git a/kayak/commit.go b/kayak/commit.go index 74867f19d..8db03afe3 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -18,13 +18,14 @@ package kayak import ( "context" + "sync/atomic" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/utils/timer" "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" - "sync/atomic" ) func (r *Runtime) leaderCommitResult(ctx context.Context, tm *timer.Timer, reqPayload interface{}, prepareLog *kt.Log) (res *commitFuture) { @@ -55,6 +56,8 @@ func (r *Runtime) leaderCommitResult(ctx context.Context, tm *timer.Timer, reqPa } func (r *Runtime) followerCommitResult(ctx context.Context, tm *timer.Timer, commitLog *kt.Log, prepareLog *kt.Log, lastCommit uint64) (res *commitFuture) { + defer trace.StartRegion(ctx, "followerCommitResult").End() + // decode log and send to commit channel to process res = newCommitFuture() @@ -79,7 +82,7 @@ func (r *Runtime) followerCommitResult(ctx context.Context, tm *timer.Timer, com // decode prepare log var logReq interface{} var err error - if logReq, err = r.sh.DecodePayload(prepareLog.Data); err != nil { + if logReq, err = r.doDecodePayload(ctx, prepareLog.Data); err != nil { res.Set(&commitResult{err: errors.Wrap(err, "decode log payload failed")}) return } @@ -121,7 +124,7 @@ func (r *Runtime) commitCycle() { } } -func (r *Runtime) leaderDoCommit(req *commitReq) () { +func (r *Runtime) leaderDoCommit(req *commitReq) { if req.log != nil { // mis-use follower commit for leader log.Fatal("INVALID EXISTING LOG FOR LEADER COMMIT") @@ -213,6 +216,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { } func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { + defer trace.StartRegion(ctx, "getPrepareLog").End() var prepareIndex uint64 // decode prepare index diff --git a/kayak/log.go b/kayak/log.go index d027fb094..b6d632f86 100644 --- a/kayak/log.go +++ b/kayak/log.go @@ -18,11 +18,12 @@ package kayak import ( "context" + "io" + "log" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" - "io" - "log" ) func (r *Runtime) newLog(ctx context.Context, logType kt.LogType, data []byte) (l *kt.Log, err error) { @@ -114,7 +115,7 @@ func (r *Runtime) readLogs() (err error) { } // record nextIndex - r.updateNextIndex(l) + r.updateNextIndex(context.Background(), l) } return diff --git a/kayak/processes.go b/kayak/processes.go index 4ef78558f..82b3616c8 100644 --- a/kayak/processes.go +++ b/kayak/processes.go @@ -18,6 +18,7 @@ package kayak import ( "context" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/utils/timer" @@ -26,6 +27,8 @@ import ( ) func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req interface{}) (prepareLog *kt.Log, err error) { + defer trace.StartRegion(ctx, "doLeaderPrepare").End() + // check prepare in leader if err = r.doCheck(ctx, req); err != nil { err = errors.Wrap(err, "leader verify log") @@ -51,7 +54,7 @@ func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req inte } // Leader pending map handling. - r.markPendingPrepare(prepareLog.Index) + r.markPendingPrepare(ctx, prepareLog.Index) tm.Add("leader_prepare") @@ -76,6 +79,7 @@ func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req inte func (r *Runtime) doLeaderCommit(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log, req interface{}) ( result interface{}, logIndex uint64, err error) { + defer trace.StartRegion(ctx, "doLeaderCommit").End() var commitResult *commitResult if commitResult, err = r.leaderCommitResult(ctx, tm, req, prepareLog).Get(ctx); err != nil { return @@ -95,6 +99,7 @@ func (r *Runtime) doLeaderCommit(ctx context.Context, tm *timer.Timer, prepareLo } func (r *Runtime) doLeaderRollback(ctx context.Context, tm *timer.Timer, prepareLog *kt.Log) { + defer trace.StartRegion(ctx, "doLeaderRollback").End() // rollback local var rollbackLog *kt.Log var logErr error @@ -150,7 +155,7 @@ func (r *Runtime) followerPrepare(ctx context.Context, tm *timer.Timer, l *kt.Lo } tm.Add("write_wal") - r.markPendingPrepare(l.Index) + r.markPendingPrepare(ctx, l.Index) tm.Add("mark") return @@ -165,7 +170,7 @@ func (r *Runtime) followerRollback(ctx context.Context, tm *timer.Timer, l *kt.L tm.Add("get_prepare") // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { + if r.checkIfPrepareFinished(ctx, prepareLog.Index) { err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") return } @@ -177,7 +182,7 @@ func (r *Runtime) followerRollback(ctx context.Context, tm *timer.Timer, l *kt.L } tm.Add("write_wal") - r.markPrepareFinished(l.Index) + r.markPrepareFinished(ctx, l.Index) tm.Add("mark") return @@ -201,7 +206,7 @@ func (r *Runtime) followerCommit(ctx context.Context, tm *timer.Timer, l *kt.Log tm.Add("get_prepare") // check if prepare already processed - if r.checkIfPrepareFinished(prepareLog.Index) { + if r.checkIfPrepareFinished(ctx, prepareLog.Index) { err = errors.Wrap(kt.ErrInvalidLog, "prepare request already processed") return } @@ -212,7 +217,7 @@ func (r *Runtime) followerCommit(ctx context.Context, tm *timer.Timer, l *kt.Log err = cResult.err } - r.markPrepareFinished(l.Index) + r.markPrepareFinished(ctx, l.Index) tm.Add("mark") return diff --git a/kayak/runtime.go b/kayak/runtime.go index a1c33a13d..7d406fa49 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -19,7 +19,6 @@ package kayak import ( "context" "fmt" - "github.com/CovenantSQL/CovenantSQL/utils/timer" "math" "sync" "sync/atomic" @@ -28,6 +27,7 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/timer" "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -290,7 +290,7 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ r.peersLock.RLock() defer r.peersLock.RUnlock() - tm.Add("prl") + tm.Add("peers_lock") waitForLockRegion.End() @@ -304,7 +304,7 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ prepareLog, err := r.doLeaderPrepare(ctx, tm, req) if prepareLog != nil { - defer r.markPrepareFinished(prepareLog.Index) + defer r.markPrepareFinished(ctx, prepareLog.Index) } if err == nil { @@ -341,9 +341,15 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { Debug("kayak follower apply") }() + waitForLockRegion := trace.StartRegion(ctx, "peersLock") + r.peersLock.RLock() defer r.peersLock.RUnlock() + tm.Add("peers_lock") + + waitForLockRegion.End() + if r.role == proto.Leader { // not follower err = kt.ErrNotFollower @@ -361,7 +367,7 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { } if err == nil { - r.updateNextIndex(l) + r.updateNextIndex(ctx, l) } return @@ -375,7 +381,9 @@ func (r *Runtime) UpdatePeers(peers *proto.Peers) (err error) { return } -func (r *Runtime) updateNextIndex(l *kt.Log) { +func (r *Runtime) updateNextIndex(ctx context.Context, l *kt.Log) { + defer trace.StartRegion(ctx, "updateNextIndex").End() + r.nextIndexLock.Lock() defer r.nextIndexLock.Unlock() @@ -384,21 +392,27 @@ func (r *Runtime) updateNextIndex(l *kt.Log) { } } -func (r *Runtime) checkIfPrepareFinished(index uint64) (finished bool) { +func (r *Runtime) checkIfPrepareFinished(ctx context.Context, index uint64) (finished bool) { + defer trace.StartRegion(ctx, "checkIfPrepareFinished").End() + r.pendingPreparesLock.RLock() defer r.pendingPreparesLock.RUnlock() return !r.pendingPrepares[index] } -func (r *Runtime) markPendingPrepare(index uint64) { +func (r *Runtime) markPendingPrepare(ctx context.Context, index uint64) { + defer trace.StartRegion(ctx, "markPendingPrepare").End() + r.pendingPreparesLock.Lock() defer r.pendingPreparesLock.Unlock() r.pendingPrepares[index] = true } -func (r *Runtime) markPrepareFinished(index uint64) { +func (r *Runtime) markPrepareFinished(ctx context.Context, index uint64) { + defer trace.StartRegion(ctx, "markPrepareFinished").End() + r.pendingPreparesLock.Lock() defer r.pendingPreparesLock.Unlock() diff --git a/kayak/tracker.go b/kayak/tracker.go index 986a9198b..e43de4546 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -22,6 +22,7 @@ import ( "sync/atomic" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/trace" ) // rpcTracker defines the rpc call tracker @@ -130,6 +131,8 @@ func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, me break } + defer trace.StartRegion(ctx, "rpcCall").End() + t.errLock.RLock() defer t.errLock.RUnlock() From 3c5d3b5e284ee89f01db40a6882ed7f2eec4a863 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 11 Jan 2019 22:46:05 +0800 Subject: [PATCH 267/302] Fix rpc trace point and split followerApply task types --- kayak/runtime.go | 2 +- kayak/tracker.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index 7d406fa49..efab530ca 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -325,7 +325,7 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { return } - ctx, task := trace.NewTask(context.Background(), "Kayak.FollowerApply") + ctx, task := trace.NewTask(context.Background(), "Kayak.FollowerApply."+l.Type.String()) defer task.End() tm := timer.NewTimer() diff --git a/kayak/tracker.go b/kayak/tracker.go index e43de4546..ae91aeb95 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -115,6 +115,8 @@ func (t *rpcTracker) done() { } func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, meets bool, finished bool) { + defer trace.StartRegion(ctx, "rpcCall").End() + for { select { case <-t.doneCh: @@ -131,8 +133,6 @@ func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, me break } - defer trace.StartRegion(ctx, "rpcCall").End() - t.errLock.RLock() defer t.errLock.RUnlock() From 287a5b779cab683c49a6a0b96637c7bb792a4eef Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 13 Jan 2019 20:19:44 +0800 Subject: [PATCH 268/302] Calc follower real commitcycle cost --- kayak/commit.go | 11 +++++++++-- kayak/runtime.go | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/kayak/commit.go b/kayak/commit.go index 8db03afe3..26883c63c 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -179,6 +179,10 @@ func (r *Runtime) followerDoCommit(req *commitReq) { return } + if req.task == nil { + req.task = trace.StartRegion(req.ctx, "commitCycle") + } + // check for last commit availability myLastCommit := atomic.LoadUint64(&r.lastCommit) if req.lastCommit != myLastCommit { @@ -189,6 +193,10 @@ func (r *Runtime) followerDoCommit(req *commitReq) { return } + if req.task != nil { + defer req.task.End() + } + req.tm.Add("wait_last_commit") var err error @@ -236,12 +244,11 @@ func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex } func (r *Runtime) doCommitCycle(req *commitReq) { - defer trace.StartRegion(req.ctx, "commitCycle").End() - r.peersLock.RLock() defer r.peersLock.RUnlock() if r.role == proto.Leader { + defer trace.StartRegion(req.ctx, "commitCycle").End() r.leaderDoCommit(req) } else { r.followerDoCommit(req) diff --git a/kayak/runtime.go b/kayak/runtime.go index efab530ca..a7295cd47 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -113,6 +113,7 @@ type commitReq struct { log *kt.Log result *commitFuture tm *timer.Timer + task *trace.Region } // commitResult defines the commit operation result. From eb50216ce54c4184612dab936e0061648d11ddf9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 13 Jan 2019 20:20:48 +0800 Subject: [PATCH 269/302] Separate commit wait and commit cycle cost --- kayak/commit.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kayak/commit.go b/kayak/commit.go index 26883c63c..f9a0cbaef 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -180,7 +180,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { } if req.task == nil { - req.task = trace.StartRegion(req.ctx, "commitCycle") + req.task = trace.StartRegion(req.ctx, "waitForLastCommit") } // check for last commit availability @@ -194,11 +194,14 @@ func (r *Runtime) followerDoCommit(req *commitReq) { } if req.task != nil { - defer req.task.End() + req.task.End() + req.task = nil } req.tm.Add("wait_last_commit") + defer trace.StartRegion(req.ctx, "commitCycle").End() + var err error // write log first From 7e4a0c025021c792aa1ba7f0ba5bc68b103bf137 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 13 Jan 2019 20:49:37 +0800 Subject: [PATCH 270/302] Fix waitForLastCommit region overlapping --- kayak/commit.go | 11 +++-------- kayak/runtime.go | 1 - 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/kayak/commit.go b/kayak/commit.go index f9a0cbaef..c11d3deed 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -179,9 +179,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { return } - if req.task == nil { - req.task = trace.StartRegion(req.ctx, "waitForLastCommit") - } + waitCommitTask := trace.StartRegion(req.ctx, "waitForLastCommit") // check for last commit availability myLastCommit := atomic.LoadUint64(&r.lastCommit) @@ -190,14 +188,11 @@ func (r *Runtime) followerDoCommit(req *commitReq) { go func(req *commitReq) { r.commitCh <- req }(req) + waitCommitTask.End() return } - if req.task != nil { - req.task.End() - req.task = nil - } - + waitCommitTask.End() req.tm.Add("wait_last_commit") defer trace.StartRegion(req.ctx, "commitCycle").End() diff --git a/kayak/runtime.go b/kayak/runtime.go index a7295cd47..efab530ca 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -113,7 +113,6 @@ type commitReq struct { log *kt.Log result *commitFuture tm *timer.Timer - task *trace.Region } // commitResult defines the commit operation result. From 6d4f87b7ff42ae1e6591a3f853da3e8ff3493649 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Sun, 13 Jan 2019 22:22:39 +0800 Subject: [PATCH 271/302] Fix rollback nil pointer bug --- kayak/runtime.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index efab530ca..c79ee5f70 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -313,7 +313,9 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ } // rollback - r.doLeaderRollback(ctx, tm, prepareLog) + if prepareLog != nil { + r.doLeaderRollback(ctx, tm, prepareLog) + } return } From 59dc00bf3f712345a293cd008e5f227bba3e7a77 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 00:18:49 +0800 Subject: [PATCH 272/302] Fix shutdown block bug --- kayak/runtime.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kayak/runtime.go b/kayak/runtime.go index c79ee5f70..72ec091fd 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -129,7 +129,7 @@ type commitFuture struct { func newCommitFuture() *commitFuture { return &commitFuture{ - ch: make(chan *commitResult), + ch: make(chan *commitResult, 1), } } @@ -149,7 +149,10 @@ func (f *commitFuture) Get(ctx context.Context) (cr *commitResult, err error) { } func (f *commitFuture) Set(cr *commitResult) { - f.ch <- cr + select { + case f.ch <- cr: + default: + } } // NewRuntime creates new kayak Runtime. From 951e8795a09574c6b9fdb38ffb84bd755337246e Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 16:33:46 +0800 Subject: [PATCH 273/302] Fix chain produce block bug --- cmd/cqld/adapter.go | 4 ++-- kayak/callbacks.go | 5 +++-- kayak/commit.go | 4 ++-- kayak/runtime_test.go | 6 +++--- kayak/types/handler.go | 2 +- sqlchain/chain.go | 4 ++-- worker/db.go | 4 ++-- worker/db_storage.go | 4 ++-- xenomint/chain.go | 2 +- xenomint/state.go | 14 ++++++++------ 10 files changed, 26 insertions(+), 23 deletions(-) diff --git a/cmd/cqld/adapter.go b/cmd/cqld/adapter.go index 047d3b726..8b41a2769 100644 --- a/cmd/cqld/adapter.go +++ b/cmd/cqld/adapter.go @@ -115,7 +115,7 @@ func (s *LocalStorage) Check(req interface{}) (err error) { } // Commit implements kayak.types.Handler.Commit. -func (s *LocalStorage) Commit(req interface{}) (_ interface{}, err error) { +func (s *LocalStorage) Commit(req interface{}, isLeader bool) (_ interface{}, err error) { var kp *KayakPayload var cl *compiledLog var ok bool @@ -246,7 +246,7 @@ func (s *KayakKVServer) Init(storePath string, initNodes []proto.Node) (err erro Command: CmdSet, Data: nodeBuf.Bytes(), } - _, err = s.KVStorage.Commit(payload) + _, err = s.KVStorage.Commit(payload, true) if err != nil { log.WithError(err).Error("init kayak KV commit node failed") return diff --git a/kayak/callbacks.go b/kayak/callbacks.go index e75e8119f..e0814115b 100644 --- a/kayak/callbacks.go +++ b/kayak/callbacks.go @@ -18,6 +18,7 @@ package kayak import ( "context" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -47,7 +48,7 @@ func (r *Runtime) doDecodePayload(ctx context.Context, data []byte) (req interfa return } -func (r *Runtime) doCommit(ctx context.Context, req interface{}) (result interface{}, err error) { +func (r *Runtime) doCommit(ctx context.Context, req interface{}, isLeader bool) (result interface{}, err error) { defer trace.StartRegion(ctx, "commit").End() - return r.sh.Commit(req) + return r.sh.Commit(req, isLeader) } diff --git a/kayak/commit.go b/kayak/commit.go index c11d3deed..a53e7edca 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -150,7 +150,7 @@ func (r *Runtime) leaderDoCommit(req *commitReq) { req.tm.Add("write_wal") // not wrapping underlying handler commit error - cr.result, err = r.doCommit(req.ctx, req.data) + cr.result, err = r.doCommit(req.ctx, req.data, true) req.tm.Add("db_write") @@ -207,7 +207,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { req.tm.Add("write_wal") // do commit, not wrapping underlying handler commit error - _, err = r.doCommit(req.ctx, req.data) + _, err = r.doCommit(req.ctx, req.data, false) req.tm.Add("db_write") diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index b0127a58e..51c861add 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -22,7 +22,6 @@ import ( "database/sql" "encoding/binary" "fmt" - "github.com/CovenantSQL/CovenantSQL/utils/trace" "math/rand" "net" "net/rpc" @@ -39,7 +38,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/jordwest/mock-conn" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -104,7 +104,7 @@ func (s *sqliteStorage) Check(data interface{}) (err error) { return nil } -func (s *sqliteStorage) Commit(data interface{}) (result interface{}, err error) { +func (s *sqliteStorage) Commit(data interface{}, isLeader bool) (result interface{}, err error) { var d *queryStructure var ok bool if d, ok = data.(*queryStructure); !ok { diff --git a/kayak/types/handler.go b/kayak/types/handler.go index c74b053e2..3be587072 100644 --- a/kayak/types/handler.go +++ b/kayak/types/handler.go @@ -21,5 +21,5 @@ type Handler interface { EncodePayload(req interface{}) (data []byte, err error) DecodePayload(data []byte) (req interface{}, err error) Check(request interface{}) error - Commit(request interface{}) (result interface{}, err error) + Commit(request interface{}, isLeader bool) (result interface{}, err error) } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 335024f0e..e7130574f 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1199,11 +1199,11 @@ func (c *Chain) replicationCycle(ctx context.Context) { // Query queries req from local chain state and returns the query results in resp. func (c *Chain) Query( - req *types.Request) (tracker *x.QueryTracker, resp *types.Response, err error, + req *types.Request, isLeader bool) (tracker *x.QueryTracker, resp *types.Response, err error, ) { // TODO(leventeliu): we're using an external context passed by request. Make sure that // cancelling will be propagated to this context before chain instance stops. - return c.st.QueryWithContext(req.GetContext(), req) + return c.st.QueryWithContext(req.GetContext(), req, isLeader) } // AddResponse addes a response to the ackIndex, awaiting for acknowledgement. diff --git a/worker/db.go b/worker/db.go index e3ced3393..691a4b799 100644 --- a/worker/db.go +++ b/worker/db.go @@ -244,7 +244,7 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: - if tracker, response, err = db.chain.Query(request); err != nil { + if tracker, response, err = db.chain.Query(request, false); err != nil { err = errors.Wrap(err, "failed to query read query") return } @@ -252,7 +252,7 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err if db.cfg.UseEventualConsistency { // reset context request.SetContext(context.Background()) - if tracker, response, err = db.chain.Query(request); err != nil { + if tracker, response, err = db.chain.Query(request, true); err != nil { err = errors.Wrap(err, "failed to execute with eventual consistency") return } diff --git a/worker/db_storage.go b/worker/db_storage.go index f20569e7e..6d83715db 100644 --- a/worker/db_storage.go +++ b/worker/db_storage.go @@ -106,7 +106,7 @@ type TrackerAndResponse struct { } // Commit implements kayak.types.Handler.Commit. -func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { +func (db *Database) Commit(rawReq interface{}, isLeader bool) (result interface{}, err error) { // convert query and check syntax var ( req *types.Request @@ -123,7 +123,7 @@ func (db *Database) Commit(rawReq interface{}) (result interface{}, err error) { req.SetContext(context.Background()) // execute - if tracker, response, err = db.chain.Query(req); err != nil { + if tracker, response, err = db.chain.Query(req, isLeader); err != nil { return } result = &TrackerAndResponse{ diff --git a/xenomint/chain.go b/xenomint/chain.go index e207cc029..8cecdab00 100644 --- a/xenomint/chain.go +++ b/xenomint/chain.go @@ -80,7 +80,7 @@ func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { } log.WithFields(fields).Debug("Chain.Query duration stat (us)") }() - if ref, resp, err = c.state.Query(req); err != nil { + if ref, resp, err = c.state.Query(req, true); err != nil { return } queried = time.Since(start) diff --git a/xenomint/state.go b/xenomint/state.go index fcbef3acd..83835e27e 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -346,7 +346,7 @@ func (s *State) writeSingle( } func (s *State) write( - ctx context.Context, req *types.Request) (ref *QueryTracker, resp *types.Response, err error, + ctx context.Context, req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error, ) { var ( lastSeq uint64 @@ -434,7 +434,9 @@ func (s *State) write( s.flushSQLExecuter() } writeDone = time.Since(start) - s.pool.enqueue(lastSeq, query) + if isLeader { + s.pool.enqueue(lastSeq, query) + } enqueued = time.Since(start) return }(); err != nil { @@ -659,20 +661,20 @@ func (s *State) getLocalTime() time.Time { // Query does the query(ies) in req, pools the request and persists any change to // the underlying storage. -func (s *State) Query(req *types.Request) (ref *QueryTracker, resp *types.Response, err error) { - return s.QueryWithContext(context.Background(), req) +func (s *State) Query(req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error) { + return s.QueryWithContext(context.Background(), req, isLeader) } // QueryWithContext does the query(ies) in req, pools the request and persists any change to // the underlying storage. func (s *State) QueryWithContext( - ctx context.Context, req *types.Request) (ref *QueryTracker, resp *types.Response, err error, + ctx context.Context, req *types.Request, isLeader bool) (ref *QueryTracker, resp *types.Response, err error, ) { switch req.Header.QueryType { case types.ReadQuery: return s.readTx(ctx, req) case types.WriteQuery: - return s.write(ctx, req) + return s.write(ctx, req, isLeader) default: err = ErrInvalidRequest } From 5752a42f3f883fb8309341c9d2a2a256fbd0159f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 16:44:07 +0800 Subject: [PATCH 274/302] Fix chainbus service code typo --- worker/chainbusservice.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/worker/chainbusservice.go b/worker/chainbusservice.go index 5ac6aa11a..2da540916 100644 --- a/worker/chainbusservice.go +++ b/worker/chainbusservice.go @@ -48,7 +48,7 @@ type BusService struct { lock sync.RWMutex // a lock for the map blockCount uint32 sqlChainProfiles map[proto.DatabaseID]*types.SQLChainProfile - sqlChainState map[proto.DatabaseID](map[proto.AccountAddress]*types.PermStat) + sqlChainState map[proto.DatabaseID]map[proto.AccountAddress]*types.PermStat } // NewBusService creates a new chain bus instance. @@ -87,7 +87,7 @@ func (bs *BusService) updateState(count uint32, profiles []*types.SQLChainProfil defer bs.lock.Unlock() var ( rebuilt = make(map[proto.DatabaseID]*types.SQLChainProfile) - sqlchainState = make(map[proto.DatabaseID](map[proto.AccountAddress]*types.PermStat)) + sqlchainState = make(map[proto.DatabaseID]map[proto.AccountAddress]*types.PermStat) ) for _, v := range profiles { rebuilt[v.ID] = v From 57ae1feddc8706db6e313d6acafe17109681e64d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 14 Jan 2019 17:09:35 +0800 Subject: [PATCH 275/302] Increase provide service interval and send provide service request after database creation --- cmd/cql-minerd/dbms.go | 9 +++++---- cmd/cql-minerd/main.go | 10 ++++++---- test/integration/node_miner_0/config.yaml | 2 +- test/integration/node_miner_1/config.yaml | 2 +- test/integration/node_miner_2/config.yaml | 2 +- worker/dbms.go | 4 ++++ worker/dbms_config.go | 7 ++++--- 7 files changed, 22 insertions(+), 14 deletions(-) diff --git a/cmd/cql-minerd/dbms.go b/cmd/cql-minerd/dbms.go index d033f16de..bcd234c8d 100644 --- a/cmd/cql-minerd/dbms.go +++ b/cmd/cql-minerd/dbms.go @@ -38,16 +38,17 @@ import ( var rootHash = hash.Hash{} -func startDBMS(server *rpc.Server) (dbms *worker.DBMS, err error) { +func startDBMS(server *rpc.Server, onCreateDB func()) (dbms *worker.DBMS, err error) { if conf.GConf.Miner == nil { err = errors.New("invalid database config") return } cfg := &worker.DBMSConfig{ - RootDir: conf.GConf.Miner.RootDir, - Server: server, - MaxReqTimeGap: conf.GConf.Miner.MaxReqTimeGap, + RootDir: conf.GConf.Miner.RootDir, + Server: server, + MaxReqTimeGap: conf.GConf.Miner.MaxReqTimeGap, + OnCreateDatabase: onCreateDB, } if dbms, err = worker.NewDBMS(cfg); err != nil { diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 99b2949fd..2f8a17a33 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -197,11 +197,11 @@ func main() { } } + // start prometheus collector + reg := metric.StartMetricCollector() + // start period provide service transaction generator go func() { - // start prometheus collector - reg := metric.StartMetricCollector() - tick := time.NewTicker(conf.GConf.Miner.ProvideServiceInterval) defer tick.Stop() @@ -218,7 +218,9 @@ func main() { // start dbms var dbms *worker.DBMS - if dbms, err = startDBMS(server); err != nil { + if dbms, err = startDBMS(server, func() { + sendProvideService(reg) + }); err != nil { log.WithError(err).Fatal("start dbms failed") } diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index 8016896ab..ceac395a8 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index f4b53d9cb..41eb0305b 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index aafdfefc9..51ec8f581 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -51,7 +51,7 @@ Miner: IsTestMode: true RootDir: "./data" MaxReqTimeGap: "2s" - ProvideServiceInterval: "3s" + ProvideServiceInterval: "60s" KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/worker/dbms.go b/worker/dbms.go index b6d3e59e9..6aa7834f9 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -231,6 +231,10 @@ func (dbms *DBMS) createDatabase(tx interfaces.Transaction, count uint32) { if err != nil { log.WithError(err).Error("create database error") } + + if dbms.cfg.OnCreateDatabase != nil { + go dbms.cfg.OnCreateDatabase() + } } func (dbms *DBMS) buildSQLChainServiceInstance( diff --git a/worker/dbms_config.go b/worker/dbms_config.go index bc8fcded7..e701b1a19 100644 --- a/worker/dbms_config.go +++ b/worker/dbms_config.go @@ -29,7 +29,8 @@ var ( // DBMSConfig defines the local multi-database management system config. type DBMSConfig struct { - RootDir string - Server *rpc.Server - MaxReqTimeGap time.Duration + RootDir string + Server *rpc.Server + MaxReqTimeGap time.Duration + OnCreateDatabase func() } From 5014de4ec4e451d36fa2fa9d0f91c009397f0274 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 16 Jan 2019 10:40:27 +0800 Subject: [PATCH 276/302] Add kayak fetch log recover and make zero commitThreshold configurable --- cmd/cqld/bootstrap.go | 16 ++++--- cmd/cqld/kayak.go | 15 ++++++- kayak/commit.go | 15 +++++-- kayak/fetch.go | 99 +++++++++++++++++++++++++++++++++++++++++++ kayak/processes.go | 4 +- kayak/rpc.go | 4 +- kayak/runtime.go | 79 ++++++++++++++++++++++++++++------ kayak/runtime_test.go | 32 ++++++++++---- kayak/tracker.go | 2 +- kayak/tracker_test.go | 2 +- kayak/types/config.go | 8 +++- kayak/types/errors.go | 2 + kayak/types/rpc.go | 18 +++++++- kayak/waiter.go | 88 ++++++++++++++++++++++++++++++++++++++ worker/db.go | 9 +++- worker/dbms_mux.go | 26 ++++++++++-- 16 files changed, 372 insertions(+), 47 deletions(-) create mode 100644 kayak/fetch.go create mode 100644 kayak/waiter.go diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 616554a56..63f44b31c 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -42,11 +42,13 @@ import ( ) const ( - kayakServiceName = "Kayak" - kayakMethodName = "Call" - kayakWalFileName = "kayak.ldb" - kayakPrepareTimeout = 5 * time.Second - kayakCommitTimeout = time.Minute + kayakServiceName = "Kayak" + kayakApplyMethodName = "Apply" + kayakFetchMethodName = "Fetch" + kayakWalFileName = "kayak.ldb" + kayakPrepareTimeout = 5 * time.Second + kayakCommitTimeout = time.Minute + kayakLogWaitTimeout = 30 * time.Second ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { @@ -226,11 +228,13 @@ func initKayakTwoPC(rootDir string, node *proto.Node, peers *proto.Peers, h kt.H CommitThreshold: 1.0, PrepareTimeout: kayakPrepareTimeout, CommitTimeout: kayakCommitTimeout, + LogWaitTimeout: kayakLogWaitTimeout, Peers: peers, Wal: logWal, NodeID: node.ID, ServiceName: kayakServiceName, - MethodName: kayakMethodName, + ApplyMethodName: kayakApplyMethodName, + FetchMethodName: kayakFetchMethodName, } // create kayak runtime diff --git a/cmd/cqld/kayak.go b/cmd/cqld/kayak.go index 2d64c9e04..ee36f886a 100644 --- a/cmd/cqld/kayak.go +++ b/cmd/cqld/kayak.go @@ -38,7 +38,18 @@ func NewKayakService(server *rpc.Server, serviceName string, rt *kayak.Runtime) return } -// Call handles kayak call. -func (s *KayakService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { +// Apply handles kayak apply call. +func (s *KayakService) Apply(req *kt.ApplyRequest, _ *interface{}) (err error) { return s.rt.FollowerApply(req.Log) } + +// Fetch handles kayak log fetch call. +func (s *KayakService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + var l *kt.Log + if l, err = s.rt.Fetch(req.GetContext(), req.Index); err != nil { + return + } + + resp.Log = l + return +} diff --git a/kayak/commit.go b/kayak/commit.go index a53e7edca..816a6cc85 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -158,7 +158,7 @@ func (r *Runtime) leaderDoCommit(req *commitReq) { atomic.StoreUint64(&r.lastCommit, l.Index) // send commit - cr.rpc = r.rpc(l, r.minCommitFollowers) + cr.rpc = r.applyRPC(l, r.minCommitFollowers) cr.index = l.Index cr.err = err @@ -186,6 +186,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { if req.lastCommit != myLastCommit { // TODO(): need counter for retries, infinite commit re-order would cause troubles go func(req *commitReq) { + _, _ = r.waitForLog(req.ctx, req.lastCommit) r.commitCh <- req }(req) waitCommitTask.End() @@ -231,12 +232,20 @@ func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex return } + if pl, err = r.waitForLog(ctx, prepareIndex); err != nil { + err = errors.Wrap(err, "wait for prepare log failed") + return + } + // decode commit index if len(l.Data) >= 16 { lastCommitIndex, _ = r.bytesToUint64(l.Data[8:]) - } - pl, err = r.wal.Get(prepareIndex) + if _, err = r.waitForLog(ctx, lastCommitIndex); err != nil { + err = errors.Wrap(err, "wait for last commit log failed") + return + } + } return } diff --git a/kayak/fetch.go b/kayak/fetch.go new file mode 100644 index 000000000..9f6069918 --- /dev/null +++ b/kayak/fetch.go @@ -0,0 +1,99 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func (r *Runtime) markMissingLog(index uint64) { + log.WithFields(log.Fields{ + "index": index, + "instance": r.instanceID, + }).Debug("mark log missing, start fetch") + rawItem, _ := r.waitLogMap.LoadOrStore(index, newWaitItem(index)) + item := rawItem.(*waitItem) + + select { + case <-r.stopCh: + case r.missingLogCh <- item: + } +} + +func (r *Runtime) missingLogCycle() { + for { + var waitItem *waitItem + + select { + case <-r.stopCh: + return + case waitItem = <-r.missingLogCh: + } + + // execute + func() { + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + if waitItem == nil { + return + } + + waitItem.waitLock.Lock() + defer waitItem.waitLock.Unlock() + + var ( + req = &kt.FetchRequest{ + Instance: r.instanceID, + Index: waitItem.index, + } + resp = &kt.FetchResponse{} + err error + ) + + // check existence + if _, err = r.wal.Get(waitItem.index); err == nil { + // already exists + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).Debug("log already exists") + r.triggerLogAwaits(waitItem.index) + return + } + + if err = r.getCaller(r.peers.Leader).Call(r.fetchRPCMethod, req, resp); err != nil { + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).WithError(err).Debug("fetch log failed") + return + } + + // call follower apply + if resp.Log != nil { + if err = r.FollowerApply(resp.Log); err != nil { + log.WithFields(log.Fields{ + "index": waitItem.index, + "instance": r.instanceID, + }).WithError(err).Debug("apply log failed") + } + } + }() + } +} diff --git a/kayak/processes.go b/kayak/processes.go index 82b3616c8..85796224a 100644 --- a/kayak/processes.go +++ b/kayak/processes.go @@ -59,7 +59,7 @@ func (r *Runtime) doLeaderPrepare(ctx context.Context, tm *timer.Timer, req inte tm.Add("leader_prepare") // send prepare to all nodes - prepareTracker := r.rpc(prepareLog, r.minPreparedFollowers) + prepareTracker := r.applyRPC(prepareLog, r.minPreparedFollowers) prepareCtx, prepareCtxCancelFunc := context.WithTimeout(ctx, r.prepareTimeout) defer prepareCtxCancelFunc() prepareErrors, prepareDone, _ := prepareTracker.get(prepareCtx) @@ -112,7 +112,7 @@ func (r *Runtime) doLeaderRollback(ctx context.Context, tm *timer.Timer, prepare defer trace.StartRegion(ctx, "followerRollback").End() // async send rollback to all nodes - r.rpc(rollbackLog, 0) + r.applyRPC(rollbackLog, 0) tm.Add("follower_rollback") } diff --git a/kayak/rpc.go b/kayak/rpc.go index 4dbbe0464..07a532426 100644 --- a/kayak/rpc.go +++ b/kayak/rpc.go @@ -39,8 +39,8 @@ func (r *Runtime) errorSummary(errs map[proto.NodeID]error) error { } /// rpc related -func (r *Runtime) rpc(l *kt.Log, minCount int) (tracker *rpcTracker) { - req := &kt.RPCRequest{ +func (r *Runtime) applyRPC(l *kt.Log, minCount int) (tracker *rpcTracker) { + req := &kt.ApplyRequest{ Instance: r.instanceID, Log: l, } diff --git a/kayak/runtime.go b/kayak/runtime.go index 72ec091fd..47038a7de 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -18,7 +18,6 @@ package kayak import ( "context" - "fmt" "math" "sync" "sync/atomic" @@ -35,8 +34,10 @@ import ( const ( // commit channel window size commitWindow = 0 - // prepare window - trackerWindow = 10 + // missing log window + missingLogWindow = 10 + // missing log concurrency + missingLogConcurrency = 10 ) // Runtime defines the main kayak Runtime. @@ -81,10 +82,10 @@ type Runtime struct { callerMap sync.Map // map[proto.NodeID]Caller // service name for mux service. serviceName string - // rpc method for coordination requests. - rpcMethod string - // tracks the outgoing rpc requests. - rpcTrackCh chan *rpcTracker + // rpc method for apply requests. + applyRPCMethod string + // rpc method for fetch requests. + fetchRPCMethod string //// Parameters // prepare threshold defines the minimum node count requirement for prepare operation. @@ -95,8 +96,13 @@ type Runtime struct { prepareTimeout time.Duration // commit timeout defines the max allowed time for commit operation. commitTimeout time.Duration + // log wait timeout to fetch missing logs. + logWaitTimeout time.Duration // channel for awaiting commits. commitCh chan *commitReq + // channel for missing log indexes. + missingLogCh chan *waitItem + waitLogMap sync.Map // map[uint64]*waitItem /// Sub-routines management. started uint32 @@ -221,16 +227,18 @@ func NewRuntime(cfg *kt.RuntimeConfig) (rt *Runtime, err error) { minCommitFollowers: minCommitFollowers, // rpc related - serviceName: cfg.ServiceName, - rpcMethod: fmt.Sprintf("%v.%v", cfg.ServiceName, cfg.MethodName), - rpcTrackCh: make(chan *rpcTracker, trackerWindow), + serviceName: cfg.ServiceName, + applyRPCMethod: cfg.ServiceName + "." + cfg.ApplyMethodName, + fetchRPCMethod: cfg.ServiceName + "." + cfg.FetchMethodName, // commits related prepareThreshold: cfg.PrepareThreshold, prepareTimeout: cfg.PrepareTimeout, commitThreshold: cfg.CommitThreshold, commitTimeout: cfg.CommitTimeout, + logWaitTimeout: cfg.LogWaitTimeout, commitCh: make(chan *commitReq, commitWindow), + missingLogCh: make(chan *waitItem, missingLogWindow), // stop coordinator stopCh: make(chan struct{}), @@ -252,8 +260,10 @@ func (r *Runtime) Start() (err error) { // start commit cycle r.goFunc(r.commitCycle) - // start rpc tracker collector - // TODO(): + // start missing log worker + for i := 0; i != missingLogConcurrency; i++ { + r.goFunc(r.missingLogCycle) + } return } @@ -276,6 +286,11 @@ func (r *Runtime) Shutdown() (err error) { // Apply defines entry for Leader node. func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{}, logIndex uint64, err error) { + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return + } + ctx, task := trace.NewTask(ctx, "Kayak.Apply") defer task.End() @@ -323,12 +338,51 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ return } +// Fetch defines entry for missing log fetch. +func (r *Runtime) Fetch(ctx context.Context, index uint64) (l *kt.Log, err error) { + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return + } + + tm := timer.NewTimer() + + defer func() { + log.WithField("l", index). + WithFields(tm.ToLogFields()). + WithError(err). + Debug("kayak log fetch") + }() + + waitForLockRegion := trace.StartRegion(ctx, "peersLock") + + r.peersLock.RLock() + defer r.peersLock.RUnlock() + + tm.Add("peers_lock") + + waitForLockRegion.End() + + if r.role != proto.Leader { + // not leader + err = kt.ErrNotLeader + return + } + + // wal get + return r.wal.Get(index) +} + // FollowerApply defines entry for follower node. func (r *Runtime) FollowerApply(l *kt.Log) (err error) { if l == nil { err = errors.Wrap(kt.ErrInvalidLog, "log is nil") return } + if atomic.LoadUint32(&r.started) != 1 { + err = kt.ErrStopped + return + } ctx, task := trace.NewTask(context.Background(), "Kayak.FollowerApply."+l.Type.String()) defer task.End() @@ -373,6 +427,7 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { if err == nil { r.updateNextIndex(ctx, l) + r.triggerLogAwaits(l.Index) } return diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index 51c861add..b79077718 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -162,12 +162,22 @@ func newFakeService(rt *kayak.Runtime) (fs *fakeService) { return } -func (s *fakeService) Call(req *kt.RPCRequest, resp *interface{}) (err error) { +func (s *fakeService) Apply(req *kt.ApplyRequest, resp *interface{}) (err error) { // add some delay for timeout test //time.Sleep(time.Millisecond * 10) return s.rt.FollowerApply(req.Log) } +func (s *fakeService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + var l *kt.Log + if l, err = s.rt.Fetch(req.GetContext(), req.Index); err != nil { + return + } + + resp.Log = l + return +} + func (s *fakeService) serveConn(c net.Conn) { s.s.ServeCodec(utils.GetMsgPackServerCodec(c)) } @@ -244,11 +254,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal1, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) @@ -261,11 +272,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal2, NodeID: node2, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt2, err := kayak.NewRuntime(cfg2) So(err, ShouldBeNil) @@ -489,11 +501,12 @@ func TestRuntime(t *testing.T) { CommitThreshold: 1.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: w, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt, err := kayak.NewRuntime(cfg) So(err, ShouldBeNil) @@ -550,14 +563,15 @@ func BenchmarkRuntime(b *testing.B) { cfg1 := &kt.RuntimeConfig{ Handler: db1, PrepareThreshold: 1.0, - CommitThreshold: 1.0, + CommitThreshold: 0.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal1, NodeID: node1, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt1, err := kayak.NewRuntime(cfg1) So(err, ShouldBeNil) @@ -567,14 +581,15 @@ func BenchmarkRuntime(b *testing.B) { cfg2 := &kt.RuntimeConfig{ Handler: db2, PrepareThreshold: 1.0, - CommitThreshold: 1.0, + CommitThreshold: 0.0, PrepareTimeout: time.Second, CommitTimeout: 10 * time.Second, + LogWaitTimeout: 10 * time.Second, Peers: peers, Wal: wal2, NodeID: node2, ServiceName: "Test", - MethodName: "Call", + ApplyMethodName: "Apply", } rt2, err := kayak.NewRuntime(cfg2) So(err, ShouldBeNil) @@ -663,6 +678,7 @@ func BenchmarkRuntime(b *testing.B) { }) So(d1, ShouldHaveLength, 1) So(d1[0], ShouldHaveLength, 1) + _ = total So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) //_, _, d2, _ := db2.Query(context.Background(), []storage.Query{ diff --git a/kayak/tracker.go b/kayak/tracker.go index ae91aeb95..32671004e 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -64,7 +64,7 @@ func newTracker(r *Runtime, req interface{}, minCount int) (t *rpcTracker) { t = &rpcTracker{ r: r, nodes: nodes, - method: r.rpcMethod, + method: r.applyRPCMethod, req: req, minCount: minCount, errors: make(map[proto.NodeID]error, len(nodes)), diff --git a/kayak/tracker_test.go b/kayak/tracker_test.go index 6ffccce6b..f2e82acfe 100644 --- a/kayak/tracker_test.go +++ b/kayak/tracker_test.go @@ -44,7 +44,7 @@ func TestTracker(t *testing.T) { nodeID1 := proto.NodeID("000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5") nodeID2 := proto.NodeID("000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade") r := &Runtime{ - rpcMethod: "test", + applyRPCMethod: "test", followers: []proto.NodeID{ nodeID1, nodeID2, diff --git a/kayak/types/config.go b/kayak/types/config.go index 0407a5e4a..15170c957 100644 --- a/kayak/types/config.go +++ b/kayak/types/config.go @@ -44,6 +44,10 @@ type RuntimeConfig struct { InstanceID string // mux service name. ServiceName string - // mux service method. - MethodName string + // apply service method. + ApplyMethodName string + // fetch service method. + FetchMethodName string + // fetch timeout. + LogWaitTimeout time.Duration } diff --git a/kayak/types/errors.go b/kayak/types/errors.go index 2912207d7..2bd7b8686 100644 --- a/kayak/types/errors.go +++ b/kayak/types/errors.go @@ -33,4 +33,6 @@ var ( ErrNotInPeer = errors.New("node not in peer") // ErrInvalidConfig represents invalid kayak runtime config. ErrInvalidConfig = errors.New("invalid runtime config") + // ErrStopped represents runtime not started. + ErrStopped = errors.New("stopped") ) diff --git a/kayak/types/rpc.go b/kayak/types/rpc.go index 7b96f42aa..f6ee63a11 100644 --- a/kayak/types/rpc.go +++ b/kayak/types/rpc.go @@ -18,8 +18,22 @@ package types import "github.com/CovenantSQL/CovenantSQL/proto" -// RPCRequest defines the RPC request entity. -type RPCRequest struct { +// ApplyRequest defines the apply request entity. +type ApplyRequest struct { + proto.Envelope + Instance string + Log *Log +} + +// FetchRequest defines the fetch request entity. +type FetchRequest struct { + proto.Envelope + Instance string + Index uint64 +} + +// FetchResponse defines the fetch response entity. +type FetchResponse struct { proto.Envelope Instance string Log *Log diff --git a/kayak/waiter.go b/kayak/waiter.go new file mode 100644 index 000000000..9fa1909ca --- /dev/null +++ b/kayak/waiter.go @@ -0,0 +1,88 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kayak + +import ( + "context" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "sync" + "time" +) + +type waitItem struct { + index uint64 + doneOnce sync.Once + ch chan struct{} + waitLock sync.Mutex +} + +func newWaitItem(index uint64) *waitItem { + return &waitItem{ + index: index, + ch: make(chan struct{}), + } +} + +func (r *Runtime) waitForLog(ctx context.Context, index uint64) (l *kt.Log, err error) { + defer trace.StartRegion(ctx, "waitForLog").End() + + for { + if l, err = r.wal.Get(index); err == nil { + // exists + return + } + + rawItem, _ := r.waitLogMap.LoadOrStore(index, newWaitItem(index)) + item := rawItem.(*waitItem) + + if item == nil { + err = kt.ErrInvalidLog + return + } + + select { + case <-item.ch: + r.waitLogMap.Delete(index) + case <-time.After(r.logWaitTimeout): + r.markMissingLog(index) + case <-ctx.Done(): + err = ctx.Err() + return + } + } +} + +func (r *Runtime) triggerLogAwaits(index uint64) { + rawItem, ok := r.waitLogMap.Load(index) + if !ok || rawItem == nil { + return + } + + item := rawItem.(*waitItem) + + if item == nil { + return + } + + item.doneOnce.Do(func() { + if item.ch != nil { + close(item.ch) + item.ch = nil + } + }) +} diff --git a/worker/db.go b/worker/db.go index 691a4b799..c579afbc6 100644 --- a/worker/db.go +++ b/worker/db.go @@ -56,7 +56,7 @@ const ( PrepareThreshold = 1.0 // CommitThreshold defines the commit complete threshold. - CommitThreshold = 1.0 + CommitThreshold = 0.0 // PrepareTimeout defines the prepare timeout config. PrepareTimeout = 10 * time.Second @@ -64,6 +64,9 @@ const ( // CommitTimeout defines the commit timeout config. CommitTimeout = time.Minute + // LogWaitTimeout defines the missing log wait timeout config. + LogWaitTimeout = 5 * time.Second + // SlowQuerySampleSize defines the maximum slow query log size (default: 1KB). SlowQuerySampleSize = 1 << 10 ) @@ -181,12 +184,14 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, CommitThreshold: CommitThreshold, PrepareTimeout: PrepareTimeout, CommitTimeout: CommitTimeout, + LogWaitTimeout: LogWaitTimeout, Peers: peers, Wal: db.kayakWal, NodeID: db.nodeID, InstanceID: string(db.dbID), ServiceName: DBKayakRPCName, - MethodName: DBKayakMethodName, + ApplyMethodName: DBKayakApplyMethodName, + FetchMethodName: DBKayakFetchMethodName, } // create kayak runtime diff --git a/worker/dbms_mux.go b/worker/dbms_mux.go index bb9a897fa..4e393642d 100644 --- a/worker/dbms_mux.go +++ b/worker/dbms_mux.go @@ -27,8 +27,10 @@ import ( ) const ( - // DBKayakMethodName defines the database kayak rpc method name. - DBKayakMethodName = "Call" + // DBKayakApplyMethodName defines the database kayak apply rpc method name. + DBKayakApplyMethodName = "Apply" + // DBKayakFetchMethodName defines the database kayak fetch rpc method name. + DBKayakFetchMethodName = "Fetch" ) // DBKayakMuxService defines a mux service for sqlchain kayak. @@ -55,8 +57,8 @@ func (s *DBKayakMuxService) unregister(id proto.DatabaseID) { s.serviceMap.Delete(id) } -// Call handles kayak call. -func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) { +// Apply handles kayak apply call. +func (s *DBKayakMuxService) Apply(req *kt.ApplyRequest, _ *interface{}) (err error) { // call apply to specified kayak // treat req.Instance as DatabaseID id := proto.DatabaseID(req.Instance) @@ -67,3 +69,19 @@ func (s *DBKayakMuxService) Call(req *kt.RPCRequest, _ *interface{}) (err error) return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) } + +// Fetch handles kayak fetch call. +func (s *DBKayakMuxService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err error) { + id := proto.DatabaseID(req.Instance) + + if v, ok := s.serviceMap.Load(id); ok { + var l *kt.Log + if l, err = v.(*kayak.Runtime).Fetch(req.GetContext(), req.Index); err != nil { + resp.Log = l + resp.Instance = req.Instance + return err + } + } + + return errors.Wrapf(ErrUnknownMuxRequest, "instance %v", req.Instance) +} From 649947ec6b09d6dfb1f2f4756145786a412eba29 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 16 Jan 2019 11:39:28 +0800 Subject: [PATCH 277/302] Adjust log wait timeout --- cmd/cqld/bootstrap.go | 2 +- worker/db.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 63f44b31c..72387d2d0 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -48,7 +48,7 @@ const ( kayakWalFileName = "kayak.ldb" kayakPrepareTimeout = 5 * time.Second kayakCommitTimeout = time.Minute - kayakLogWaitTimeout = 30 * time.Second + kayakLogWaitTimeout = 10 * time.Second ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { diff --git a/worker/db.go b/worker/db.go index c579afbc6..b708fef0f 100644 --- a/worker/db.go +++ b/worker/db.go @@ -65,7 +65,7 @@ const ( CommitTimeout = time.Minute // LogWaitTimeout defines the missing log wait timeout config. - LogWaitTimeout = 5 * time.Second + LogWaitTimeout = 1 * time.Second // SlowQuerySampleSize defines the maximum slow query log size (default: 1KB). SlowQuerySampleSize = 1 << 10 From 0a4f691cce501a317c07015e4f0b562b9a5a08ee Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 17 Jan 2019 11:49:17 +0800 Subject: [PATCH 278/302] Use direct function call instead of rpc call in kayak test --- kayak/runtime_test.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index b79077718..5fa960e5f 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -26,6 +26,7 @@ import ( "net" "net/rpc" "os" + "strings" "sync/atomic" "testing" "time" @@ -39,7 +40,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/utils/trace" - mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -195,13 +195,11 @@ func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { } func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { - fakeConn := mock_conn.NewConn() - - go c.m.get(c.target).serveConn(fakeConn.Server) - client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) - defer client.Close() - - return client.Call(method, req, resp) + if strings.HasSuffix(method, "Apply") { + return c.m.get(c.target).Apply(req.(*kt.ApplyRequest), nil) + } else { + return c.m.get(c.target).Fetch(req.(*kt.FetchRequest), resp.(*kt.FetchResponse)) + } } func TestRuntime(t *testing.T) { From 78f406c63f24388ef47e9b77abd0ed89efefc7b4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 17 Jan 2019 11:49:33 +0800 Subject: [PATCH 279/302] Add rpc parallel request test --- rpc/rpcutil_test.go | 81 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 79 insertions(+), 2 deletions(-) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index aa58c9487..7ed630f97 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -27,8 +27,6 @@ import ( "testing" "time" - . "github.com/smartystreets/goconvey/convey" - "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -36,6 +34,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + . "github.com/smartystreets/goconvey/convey" ) const ( @@ -291,6 +290,84 @@ func TestNewPersistentCaller(t *testing.T) { } +func BenchmarkPersistentCaller_Call2(b *testing.B) { + log.SetLevel(log.FatalLevel) + os.Remove(PubKeyStorePath) + defer os.Remove(PubKeyStorePath) + os.Remove(publicKeyStore) + defer os.Remove(publicKeyStore) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) + defer cancel() + err := utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 2230, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + _, testFile, _, _ := runtime.Caller(0) + confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") + privateKeyPath := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/private.key") + + conf.GConf, _ = conf.LoadConfig(confFile) + log.Debugf("GConf: %#v", conf.GConf) + // reset the once + route.Once = sync.Once{} + route.InitKMS(publicKeyStore) + + addr := conf.GConf.ListenAddr + _, err = route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) + + server, err := NewServerWithService(ServiceMap{"Test": &fakeService{}}) + if err != nil { + b.Fatal(err) + } + + _ = server.InitRPCServer(addr, privateKeyPath, []byte{}) + go server.Serve() + + client := NewPersistentCaller(conf.GConf.BP.NodeID) + node1 := proto.NewNode() + node1.InitNodeCryptoInfo(100 * time.Millisecond) + node1.Addr = "1.1.1.1:1" + + client = NewPersistentCaller(conf.GConf.BP.NodeID) + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + req := &FakeRequest{} + req.Log.Data = []byte(strings.Repeat("1", 500)) + err = client.Call("Test.Call", req, nil) + if err != nil { + b.Error(err) + } + } + }) + server.Stop() +} + +type fakeService struct{} + +type FakeRequest struct { + proto.Envelope + Instance string + Log struct { + Index uint64 // log index + Version uint64 // log version + Type uint8 // log type + Producer proto.NodeID // producer node + DataLength uint64 // data length + Data []byte + } +} + +func (s *fakeService) Call(req *FakeRequest, resp *interface{}) (err error) { + time.Sleep(time.Microsecond * 200) + return +} + func BenchmarkPersistentCaller_Call(b *testing.B) { log.SetLevel(log.InfoLevel) os.Remove(PubKeyStorePath) From 4bd20e8137a8171a21d50c297cdc45657df7a9b8 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 17 Jan 2019 11:50:10 +0800 Subject: [PATCH 280/302] Fix shared secret cache bug --- rpc/sharedsecret.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rpc/sharedsecret.go b/rpc/sharedsecret.go index 9ed67ec0c..c1a5be739 100644 --- a/rpc/sharedsecret.go +++ b/rpc/sharedsecret.go @@ -36,7 +36,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe symmetricKey = []byte(`!&\\!qEyey*\cbLc,aKl`) //log.Debug("using anonymous ETLS") } else { - symmetricKeyI, ok := symmetricKeyCache.Load(nodeID) + symmetricKeyI, ok := symmetricKeyCache.Load(nodeID.String()) if ok { symmetricKey, _ = symmetricKeyI.([]byte) } else { @@ -68,7 +68,7 @@ func GetSharedSecretWith(nodeID *proto.RawNodeID, isAnonymous bool) (symmetricKe } symmetricKey = asymmetric.GenECDHSharedSecret(localPrivateKey, remotePublicKey) - symmetricKeyCache.Store(nodeID, symmetricKey) + symmetricKeyCache.Store(nodeID.String(), symmetricKey) //log.WithFields(log.Fields{ // "node": nodeID.String(), // "remotePub": fmt.Sprintf("%#x", remotePublicKey.Serialize()), From ba2231c9a37c683c6d89ecad17948aedf35b14f9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 18 Jan 2019 11:20:28 +0800 Subject: [PATCH 281/302] Use back the fake pipe rpc feature --- kayak/runtime_test.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index 5fa960e5f..b79077718 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -26,7 +26,6 @@ import ( "net" "net/rpc" "os" - "strings" "sync/atomic" "testing" "time" @@ -40,6 +39,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/utils/trace" + mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" ) @@ -195,11 +195,13 @@ func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { } func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { - if strings.HasSuffix(method, "Apply") { - return c.m.get(c.target).Apply(req.(*kt.ApplyRequest), nil) - } else { - return c.m.get(c.target).Fetch(req.(*kt.FetchRequest), resp.(*kt.FetchResponse)) - } + fakeConn := mock_conn.NewConn() + + go c.m.get(c.target).serveConn(fakeConn.Server) + client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) + defer client.Close() + + return client.Call(method, req, resp) } func TestRuntime(t *testing.T) { From 24509525eb3b1d8ff0736e1fcae5620d7127128a Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 21 Jan 2019 18:03:10 +0800 Subject: [PATCH 282/302] Add more metric tests and fix race and test case bug --- cmd/cql-minerd/various_metric_test.go | 377 ++++++++++++++++++++++++++ kayak/commit.go | 5 +- kayak/runtime_test.go | 7 +- kayak/waiter.go | 6 +- rpc/rpcutil_test.go | 10 +- types/xxxxx_test.go | 91 +++++++ xenomint/state_test.go | 56 ++-- 7 files changed, 510 insertions(+), 42 deletions(-) create mode 100644 cmd/cql-minerd/various_metric_test.go create mode 100644 types/xxxxx_test.go diff --git a/cmd/cql-minerd/various_metric_test.go b/cmd/cql-minerd/various_metric_test.go new file mode 100644 index 000000000..66f95f865 --- /dev/null +++ b/cmd/cql-minerd/various_metric_test.go @@ -0,0 +1,377 @@ +// +build !testbinary + +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import ( + "bytes" + "encoding/binary" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + kw "github.com/CovenantSQL/CovenantSQL/kayak/wal" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" + x "github.com/CovenantSQL/CovenantSQL/xenomint" + xi "github.com/CovenantSQL/CovenantSQL/xenomint/interfaces" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" + . "github.com/smartystreets/goconvey/convey" +) + +func BenchmarkDBWrite(b *testing.B) { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + _ = err + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + err = r.Sign(priv) + + var ( + strg xi.Storage + state *x.State + ) + f, _ := ioutil.TempFile("", "f") + _ = f.Close() + _ = os.Remove(f.Name()) + + strg, err = xs.NewSqlite(f.Name()) + if err == nil { + defer strg.Close() + } + state, err = x.NewState(n.ToRawNodeID().ToNodeID(), strg) + if err == nil { + defer state.Close(true) + } + + b.ResetTimer() + b.Run("commit", func(b *testing.B) { + r1 := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "CREATE TABLE insert_table0 (k int, v1 int)", + }, + }, + }, + } + + _ = r1.Sign(priv) + _, _, _ = state.Query(r1, false) + + for i := 0; i != b.N; i++ { + _, _, _ = state.Query(r, false) + } + }) +} + +func BenchmarkSignSignature(b *testing.B) { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + _ = err + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + b.ResetTimer() + b.Run("sign", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = r.Sign(priv) + } + }) + + b.ResetTimer() + b.Run("verify", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = r.Verify() + } + }) + + rs := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: r.Header, + NodeID: n.ToRawNodeID().ToNodeID(), + Timestamp: time.Now().UTC(), + RowCount: 1, + LogOffset: 1, + LastInsertID: 1, + AffectedRows: 1, + }, + }, + } + + _ = rs + + b.ResetTimer() + b.Run("sign nested", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = rs.Sign(priv) + } + }) + + b.ResetTimer() + b.Run("verify nested", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err = rs.Verify() + } + }) + + var buf *bytes.Buffer + + b.ResetTimer() + b.Run("encode request", func(b *testing.B) { + for i := 0; i != b.N; i++ { + buf, _ = utils.EncodeMsgPack(r) + } + }) + + b.ResetTimer() + b.Run("decode request", func(b *testing.B) { + for i := 0; i != b.N; i++ { + var tr *types.Request + _ = utils.DecodeMsgPack(buf.Bytes(), &tr) + } + }) + + var buf2 *bytes.Buffer + l := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogPrepare, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: buf.Bytes(), + } + + b.ResetTimer() + b.Run("encode to binlog format", func(b *testing.B) { + for i := 0; i != b.N; i++ { + buf2, _ = utils.EncodeMsgPack(l) + _ = buf2 + } + }) + + b.ResetTimer() + b.Run("decode from binlog format", func(b *testing.B) { + for i := 0; i != b.N; i++ { + var l2 *kt.Log + _ = utils.DecodeMsgPack(buf2.Bytes(), &l2) + } + }) + + f, _ := ioutil.TempFile("", "f") + _ = f.Close() + _ = os.Remove(f.Name()) + defer os.Remove(f.Name()) + w, _ := kw.NewLevelDBWal(f.Name()) + defer w.Close() + + var index uint64 + + b.Run("write wal", func(b *testing.B) { + for i := 0; i != b.N; i++ { + index = index + 1 + l.Index = index + _ = w.Write(l) + } + }) + + b.Run("get wal", func(b *testing.B) { + for i := 0; i != b.N; i++ { + index = index - 1 + if index > 0 { + _, _ = w.Get(index) + } + } + }) +} + +func TestComputeMetrics(t *testing.T) { + Convey("compute metrics", t, func() { + priv, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + + var n proto.NodeID + var a proto.AccountAddress + + r := &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: types.WriteQuery, + NodeID: n.ToRawNodeID().ToNodeID(), + DatabaseID: a.DatabaseID(), + ConnectionID: 0, + SeqNo: 1, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: types.RequestPayload{ + Queries: []types.Query{ + { + Pattern: "INSERT INTO insert_table0 ( k, v1 ) VALUES(?, ?)", + Args: []types.NamedArg{ + { + Value: 1, + }, + { + Value: 2, + }, + }, + }, + }, + }, + } + + err = r.Sign(priv) + So(err, ShouldBeNil) + + buf, err := utils.EncodeMsgPack(r) + So(err, ShouldBeNil) + + t.Logf("RequestSize: %v", len(buf.Bytes())) + + l := &kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogPrepare, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: buf.Bytes(), + } + + buf2, err := utils.EncodeMsgPack(l) + So(err, ShouldBeNil) + + t.Logf("PrepareLogSize: %v", len(buf2.Bytes())) + + rs := &types.Response{ + Header: types.SignedResponseHeader{ + ResponseHeader: types.ResponseHeader{ + Request: r.Header, + NodeID: n.ToRawNodeID().ToNodeID(), + Timestamp: time.Now().UTC(), + RowCount: 1, + LogOffset: 1, + LastInsertID: 1, + AffectedRows: 1, + }, + }, + } + + buf3, err := utils.EncodeMsgPack(rs) + So(err, ShouldBeNil) + + t.Logf("ResponseSize: %v", len(buf3.Bytes())) + + bs := make([]byte, 16) + binary.BigEndian.PutUint64(bs, 1) + binary.BigEndian.PutUint64(bs, 2) + + l2 := kt.Log{ + LogHeader: kt.LogHeader{ + Index: 1, + Version: 1, + Type: kt.LogCommit, + Producer: n.ToRawNodeID().ToNodeID(), + }, + Data: bs, + } + + buf4, err := utils.EncodeMsgPack(l2) + So(err, ShouldBeNil) + + t.Logf("CommitLogSize: %v", len(buf4.Bytes())) + }) +} diff --git a/kayak/commit.go b/kayak/commit.go index 816a6cc85..a3b46d09a 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -102,7 +102,6 @@ func (r *Runtime) followerCommitResult(ctx context.Context, tm *timer.Timer, com select { case <-ctx.Done(): case r.commitCh <- req: - tm.Add("enqueue") } return @@ -131,6 +130,8 @@ func (r *Runtime) leaderDoCommit(req *commitReq) { return } + req.tm.Add("queue") + // create leader log var ( l *kt.Log @@ -194,7 +195,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { } waitCommitTask.End() - req.tm.Add("wait_last_commit") + req.tm.Add("queue") defer trace.StartRegion(req.ctx, "commitCycle").End() diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index b79077718..90d178c85 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -35,6 +35,7 @@ import ( kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" "github.com/CovenantSQL/CovenantSQL/proto" + crpc "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -164,7 +165,6 @@ func newFakeService(rt *kayak.Runtime) (fs *fakeService) { func (s *fakeService) Apply(req *kt.ApplyRequest, resp *interface{}) (err error) { // add some delay for timeout test - //time.Sleep(time.Millisecond * 10) return s.rt.FollowerApply(req.Log) } @@ -179,7 +179,8 @@ func (s *fakeService) Fetch(req *kt.FetchRequest, resp *kt.FetchResponse) (err e } func (s *fakeService) serveConn(c net.Conn) { - s.s.ServeCodec(utils.GetMsgPackServerCodec(c)) + var r proto.NodeID + s.s.ServeCodec(crpc.NewNodeAwareServerCodec(context.Background(), utils.GetMsgPackServerCodec(c), r.ToRawNodeID())) } type fakeCaller struct { @@ -521,7 +522,7 @@ func TestRuntime(t *testing.T) { func BenchmarkRuntime(b *testing.B) { Convey("runtime test", b, func(c C) { - log.SetLevel(log.FatalLevel) + log.SetLevel(log.DebugLevel) f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) So(err, ShouldBeNil) log.SetOutput(f) diff --git a/kayak/waiter.go b/kayak/waiter.go index 9fa1909ca..675038806 100644 --- a/kayak/waiter.go +++ b/kayak/waiter.go @@ -18,10 +18,11 @@ package kayak import ( "context" - kt "github.com/CovenantSQL/CovenantSQL/kayak/types" - "github.com/CovenantSQL/CovenantSQL/utils/trace" "sync" "time" + + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" + "github.com/CovenantSQL/CovenantSQL/utils/trace" ) type waitItem struct { @@ -82,7 +83,6 @@ func (r *Runtime) triggerLogAwaits(index uint64) { item.doneOnce.Do(func() { if item.ch != nil { close(item.ch) - item.ch = nil } }) } diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 7ed630f97..57d716bdb 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -290,7 +290,7 @@ func TestNewPersistentCaller(t *testing.T) { } -func BenchmarkPersistentCaller_Call2(b *testing.B) { +func BenchmarkPersistentCaller_CallKayakLog(b *testing.B) { log.SetLevel(log.FatalLevel) os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) @@ -329,11 +329,6 @@ func BenchmarkPersistentCaller_Call2(b *testing.B) { go server.Serve() client := NewPersistentCaller(conf.GConf.BP.NodeID) - node1 := proto.NewNode() - node1.InitNodeCryptoInfo(100 * time.Millisecond) - node1.Addr = "1.1.1.1:1" - - client = NewPersistentCaller(conf.GConf.BP.NodeID) b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { @@ -345,7 +340,10 @@ func BenchmarkPersistentCaller_Call2(b *testing.B) { } } }) + b.StopTimer() + time.Sleep(5 * time.Second) server.Stop() + GetSessionPoolInstance().Close() } type fakeService struct{} diff --git a/types/xxxxx_test.go b/types/xxxxx_test.go new file mode 100644 index 000000000..a5f10abf6 --- /dev/null +++ b/types/xxxxx_test.go @@ -0,0 +1,91 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package types + +import ( + "math" + "strings" + "testing" + "time" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + . "github.com/smartystreets/goconvey/convey" +) + +func BenchmarkEncode(b *testing.B) { + Convey("test encode decode", b, func(c C) { + var ( + nodeID proto.NodeID + addr proto.AccountAddress + ) + r := &Request{ + Header: SignedRequestHeader{ + RequestHeader: RequestHeader{ + QueryType: ReadQuery, + NodeID: nodeID.ToRawNodeID().ToNodeID(), + DatabaseID: addr.DatabaseID(), + ConnectionID: math.MaxUint64, + SeqNo: math.MaxUint64, + Timestamp: time.Now().UTC(), + BatchCount: 1, + }, + }, + Payload: RequestPayload{ + Queries: []Query{ + { + Pattern: strings.Repeat("1", 1024), + Args: []NamedArg{}, + }, + }, + }, + } + + privKey, _, err := asymmetric.GenSecp256k1KeyPair() + So(err, ShouldBeNil) + So(privKey, ShouldNotBeNil) + + b.Run("a", func(b *testing.B) { + for i := 0; i != b.N; i++ { + err := r.Sign(privKey) + + req, err := utils.EncodeMsgPack(r) + bs := req.Bytes() + + b.Logf("len: %v", len(bs)) + + var e1 *Request + err = utils.DecodeMsgPack(bs, &e1) + err = e1.Verify() + _ = err + + req, err = utils.EncodeMsgPack(r) + bs = req.Bytes() + var e2 *Request + err = utils.DecodeMsgPack(bs, &e2) + err = e2.Verify() + + req, err = utils.EncodeMsgPack(r) + bs = req.Bytes() + var e3 *Request + err = utils.DecodeMsgPack(bs, &e3) + err = e3.Verify() + } + }) + }) +} diff --git a/xenomint/state_test.go b/xenomint/state_test.go index a68a893da..58bb000f3 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -88,7 +88,7 @@ func TestState(t *testing.T) { var req = buildRequest(types.WriteQuery, []types.Query{ buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), }) - _, _, err = st1.Query(req) + _, _, err = st1.Query(req, true) So(err, ShouldNotBeNil) err = errors.Cause(err) So(err, ShouldNotBeNil) @@ -102,12 +102,12 @@ func TestState(t *testing.T) { }) resp *types.Response ) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT * FROM t1`), - })) + }), true) // any schema change query will trigger performance degradation mode in current block So(err, ShouldBeNil) }) @@ -124,12 +124,12 @@ func TestState(t *testing.T) { }) resp *types.Response ) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) err = st1.commit() So(err, ShouldBeNil) - _, resp, err = st2.Query(req) + _, resp, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) err = st2.commit() @@ -137,7 +137,7 @@ func TestState(t *testing.T) { _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, 1, "v1"), buildQuery(`SELECT v FROM t1 WHERE k=?`, 1), - })) + }), true) // The use of Query instead of Exec won't produce an "attempt to write" error // like Exec, but it should still keep it readonly -- which means writes will // be ignored in this case. @@ -148,7 +148,7 @@ func TestState(t *testing.T) { req = buildRequest(types.QueryType(0xff), []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), }) - _, resp, err = st1.Query(req) + _, resp, err = st1.Query(req, true) So(err, ShouldEqual, ErrInvalidRequest) So(resp, ShouldBeNil) err = st1.Replay(req, nil) @@ -157,7 +157,7 @@ func TestState(t *testing.T) { Convey("The state should report error on malformed queries", func() { _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`XXXXXX INTO t1 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -173,7 +173,7 @@ func TestState(t *testing.T) { So(err, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`INSERT INTO t2 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -190,13 +190,13 @@ func TestState(t *testing.T) { st1.Stat(id1) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`XXXXXX v FROM t1`), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t2`), - })) + }), true) So(err, ShouldNotBeNil) So(resp, ShouldBeNil) st1.Stat(id1) @@ -210,12 +210,12 @@ func TestState(t *testing.T) { Convey("The state should work properly with reading/writing queries", func() { _, resp, err = st1.Query(buildRequest(types.WriteQuery, []types.Query{ buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[0]...), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 0) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[0][0]), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 1) So(resp.Payload, ShouldResemble, types.ResponsePayload{ @@ -229,12 +229,12 @@ func TestState(t *testing.T) { buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?)`, values[1]...), buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?); INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 0) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1`), - })) + }), true) So(err, ShouldBeNil) So(resp.Header.RowCount, ShouldEqual, 4) So(resp.Payload, ShouldResemble, types.ResponsePayload{ @@ -251,7 +251,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT * FROM t1`), - })) + }), true) So(err, ShouldBeNil) So(resp.Payload, ShouldResemble, types.ResponsePayload{ Columns: []string{"k", "v"}, @@ -268,22 +268,22 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), // Test show statements _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW CREATE TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW INDEX FROM TABLE t1`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) _, resp, err = st1.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SHOW TABLES`), - })) + }), true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) st1.Stat(id1) @@ -342,7 +342,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), } ) for i := range reqs { - qt, resp, err = st1.Query(reqs[i]) + qt, resp, err = st1.Query(reqs[i], true) So(err, ShouldBeNil) So(qt, ShouldNotBeNil) So(resp, ShouldNotBeNil) @@ -357,10 +357,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) @@ -389,7 +389,7 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), ) for i := range reqs { var resp *types.Response - qt, resp, err = st1.Query(reqs[i]) + qt, resp, err = st1.Query(reqs[i], true) So(err, ShouldBeNil) So(qt, ShouldNotBeNil) So(resp, ShouldNotBeNil) @@ -477,10 +477,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) @@ -511,10 +511,10 @@ INSERT INTO t1 (k, v) VALUES (?, ?)`, concat(values[2:4])...), req = buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT v FROM t1 WHERE k=?`, values[i][0]), }) - _, resp1, err = st1.Query(req) + _, resp1, err = st1.Query(req, true) So(err, ShouldBeNil) So(resp1, ShouldNotBeNil) - _, resp2, err = st2.Query(req) + _, resp2, err = st2.Query(req, true) So(err, ShouldBeNil) So(resp2, ShouldNotBeNil) So(resp1.Payload, ShouldResemble, resp2.Payload) From 80c8f3813206c23fe3c626f92e6d4184e5e7dbf2 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 21 Jan 2019 18:21:24 +0800 Subject: [PATCH 283/302] Disable manual trace in runtime test --- kayak/runtime_test.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index 90d178c85..6f1d7878a 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -39,7 +39,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/CovenantSQL/CovenantSQL/utils/trace" mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" @@ -210,11 +209,6 @@ func TestRuntime(t *testing.T) { lvl := log.GetLevel() log.SetLevel(log.DebugLevel) defer log.SetLevel(lvl) - f, err := os.Create("trace") - So(err, ShouldBeNil) - defer f.Close() - trace.Start(f) - defer trace.Stop() db1, err := newSQLiteStorage("test1.db") So(err, ShouldBeNil) From fb4d4537f2b58b1738038c443178915152106166 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 23 Jan 2019 05:18:07 +0800 Subject: [PATCH 284/302] Even more traces --- client/conn.go | 53 ++++++++++++++++++++---------- cmd/cql-minerd/integration_test.go | 21 ++++++++---- cmd/cql-minerd/pprof.sh | 2 +- kayak/callbacks.go | 8 ++--- kayak/commit.go | 3 ++ kayak/log.go | 1 + kayak/runtime.go | 12 ------- kayak/tracker.go | 12 ++++++- types/request_type.go | 8 +++++ types/response_type.go | 8 +++++ worker/db.go | 10 +++--- 11 files changed, 92 insertions(+), 46 deletions(-) diff --git a/client/conn.go b/client/conn.go index 383d47b6c..7dc0a3a1b 100644 --- a/client/conn.go +++ b/client/conn.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -237,6 +238,8 @@ func (c *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, e // ExecContext implements the driver.ExecerContext.ExecContext method. func (c *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (result driver.Result, err error) { + defer trace.StartRegion(ctx, "dbExec").End() + if atomic.LoadInt32(&c.closed) != 0 { err = driver.ErrBadConn return @@ -246,7 +249,7 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name sq := convertQuery(query, args) var affectedRows, lastInsertID int64 - if affectedRows, lastInsertID, _, err = c.addQuery(types.WriteQuery, sq); err != nil { + if affectedRows, lastInsertID, _, err = c.addQuery(ctx, types.WriteQuery, sq); err != nil { return } @@ -260,6 +263,8 @@ func (c *conn) ExecContext(ctx context.Context, query string, args []driver.Name // QueryContext implements the driver.QueryerContext.QueryContext method. func (c *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (rows driver.Rows, err error) { + defer trace.StartRegion(ctx, "dbQuery").End() + if atomic.LoadInt32(&c.closed) != 0 { err = driver.ErrBadConn return @@ -267,7 +272,7 @@ func (c *conn) QueryContext(ctx context.Context, query string, args []driver.Nam // TODO(xq262144): make use of the ctx argument sq := convertQuery(query, args) - _, _, rows, err = c.addQuery(types.ReadQuery, sq) + _, _, rows, err = c.addQuery(ctx, types.ReadQuery, sq) return } @@ -289,7 +294,7 @@ func (c *conn) Commit() (err error) { if len(c.queries) > 0 { // send query - if _, _, _, err = c.sendQuery(types.WriteQuery, c.queries); err != nil { + if _, _, _, err = c.sendQuery(context.Background(), types.WriteQuery, c.queries); err != nil { return } } @@ -319,7 +324,7 @@ func (c *conn) Rollback() error { return nil } -func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) addQuery(ctx context.Context, queryType types.QueryType, query *types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { if c.inTransaction { // check query type, enqueue query if queryType == types.ReadQuery { @@ -344,10 +349,12 @@ func (c *conn) addQuery(queryType types.QueryType, query *types.Query) (affected "args": query.Args, }).Debug("execute query") - return c.sendQuery(queryType, []types.Query{*query}) + return c.sendQuery(ctx, queryType, []types.Query{*query}) } -func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { +func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { + ctx, task := trace.NewTask(ctx, "sendQuery") + defer task.End() var uc *pconn // peer connection used to execute the queries uc = c.leader @@ -391,17 +398,26 @@ func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affe }, } - if err = req.Sign(c.privKey); err != nil { + if err = func() error { + defer trace.StartRegion(ctx, "signRequest").End() + return req.Sign(c.privKey) + }(); err != nil { return } var response types.Response - if err = uc.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { + if err = func() error { + defer trace.StartRegion(ctx, queryType.String()+"Query").End() + return uc.pCaller.Call(route.DBSQuery.String(), req, &response) + }(); err != nil { return } // verify response - if err = response.Verify(); err != nil { + if err = func() error { + defer trace.StartRegion(ctx, "verifyResponse").End() + return response.Verify() + }(); err != nil { return } rows = newRows(&response) @@ -412,15 +428,18 @@ func (c *conn) sendQuery(queryType types.QueryType, queries []types.Query) (affe } // build ack - uc.ackCh <- &types.Ack{ - Header: types.SignedAckHeader{ - AckHeader: types.AckHeader{ - Response: response.Header, - NodeID: c.localNodeID, - Timestamp: getLocalTime(), + func() { + defer trace.StartRegion(ctx, "ackEnqueue").End() + uc.ackCh <- &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ + Response: response.Header, + NodeID: c.localNodeID, + Timestamp: getLocalTime(), + }, }, - }, - } + } + }() return } diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index fc6f8f5bc..22ec57b00 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -46,6 +46,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" yaml "gopkg.in/yaml.v2" @@ -263,7 +264,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8080", "-metric-log", @@ -281,7 +282,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8081", "-metric-log", @@ -299,7 +300,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8082", "-metric-log", @@ -647,20 +648,25 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { ii := atomic.AddInt64(&i, 1) index := ROWSTART + ii //start := time.Now() - _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + + ctx, task := trace.NewTask(context.Background(), "BenchInsert") + + _, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ "(?, ?)", index, ii, ) //log.Warnf("insert index = %d %v", index, time.Since(start)) for err != nil && err.Error() == sqlite3.ErrBusy.Error() { // retry forever log.Warnf("index = %d retried", index) - _, err = db.Exec("INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ + _, err = db.ExecContext(ctx, "INSERT INTO "+TABLENAME+" ( k, v1 ) VALUES"+ "(?, ?)", index, ii, ) } if err != nil { b.Fatal(err) } + + task.End() } }) }) @@ -690,9 +696,11 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { } else { //has data before ROWSTART index = rand.Int63n(count - 1) } + + ctx, task := trace.NewTask(context.Background(), "BenchSelect") //log.Debugf("index = %d", index) //start := time.Now() - row := db.QueryRow("SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) + row := db.QueryRowContext(ctx, "SELECT v1 FROM "+TABLENAME+" WHERE k = ? LIMIT 1", index) //log.Warnf("select index = %d %v", index, time.Since(start)) var result []byte err = row.Scan(&result) @@ -700,6 +708,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { log.Errorf("index = %d", index) b.Fatal(err) } + task.End() } }) }) diff --git a/cmd/cql-minerd/pprof.sh b/cmd/cql-minerd/pprof.sh index 4eb3555b5..c47e218b2 100755 --- a/cmd/cql-minerd/pprof.sh +++ b/cmd/cql-minerd/pprof.sh @@ -4,7 +4,7 @@ make -C ../../ clean make -C ../../ use_all_cores -go test -bench=^BenchmarkMinerTwo$ -benchtime=15s -run ^$ +go test -bench=^BenchmarkMinerTwo$ -benchtime=15s -run ^$ -trace client.trace go tool pprof -text miner1.profile > pprof.txt go tool pprof -svg miner1.profile > tree.svg go-torch -t 180 --width=2400 miner1.profile diff --git a/kayak/callbacks.go b/kayak/callbacks.go index e0814115b..0bc1cc20f 100644 --- a/kayak/callbacks.go +++ b/kayak/callbacks.go @@ -24,7 +24,7 @@ import ( ) func (r *Runtime) doCheck(ctx context.Context, req interface{}) (err error) { - defer trace.StartRegion(ctx, "check").End() + defer trace.StartRegion(ctx, "checkCallback").End() if err = r.sh.Check(req); err != nil { err = errors.Wrap(err, "verify log") } @@ -33,7 +33,7 @@ func (r *Runtime) doCheck(ctx context.Context, req interface{}) (err error) { } func (r *Runtime) doEncodePayload(ctx context.Context, req interface{}) (enc []byte, err error) { - defer trace.StartRegion(ctx, "encodePayload").End() + defer trace.StartRegion(ctx, "encodePayloadCallback").End() if enc, err = r.sh.EncodePayload(req); err != nil { err = errors.Wrap(err, "encode kayak payload failed") } @@ -41,7 +41,7 @@ func (r *Runtime) doEncodePayload(ctx context.Context, req interface{}) (enc []b } func (r *Runtime) doDecodePayload(ctx context.Context, data []byte) (req interface{}, err error) { - defer trace.StartRegion(ctx, "decodePayload").End() + defer trace.StartRegion(ctx, "decodePayloadCallback").End() if req, err = r.sh.DecodePayload(data); err != nil { err = errors.Wrap(err, "decode kayak payload failed") } @@ -49,6 +49,6 @@ func (r *Runtime) doDecodePayload(ctx context.Context, data []byte) (req interfa } func (r *Runtime) doCommit(ctx context.Context, req interface{}, isLeader bool) (result interface{}, err error) { - defer trace.StartRegion(ctx, "commit").End() + defer trace.StartRegion(ctx, "commitCallback").End() return r.sh.Commit(req, isLeader) } diff --git a/kayak/commit.go b/kayak/commit.go index a3b46d09a..2fd34d33d 100644 --- a/kayak/commit.go +++ b/kayak/commit.go @@ -29,6 +29,8 @@ import ( ) func (r *Runtime) leaderCommitResult(ctx context.Context, tm *timer.Timer, reqPayload interface{}, prepareLog *kt.Log) (res *commitFuture) { + defer trace.StartRegion(ctx, "leaderCommitResult").End() + // decode log and send to commit channel to process res = newCommitFuture() @@ -225,6 +227,7 @@ func (r *Runtime) followerDoCommit(req *commitReq) { func (r *Runtime) getPrepareLog(ctx context.Context, l *kt.Log) (lastCommitIndex uint64, pl *kt.Log, err error) { defer trace.StartRegion(ctx, "getPrepareLog").End() + var prepareIndex uint64 // decode prepare index diff --git a/kayak/log.go b/kayak/log.go index b6d632f86..72c68f865 100644 --- a/kayak/log.go +++ b/kayak/log.go @@ -53,6 +53,7 @@ func (r *Runtime) newLog(ctx context.Context, logType kt.LogType, data []byte) ( func (r *Runtime) writeWAL(ctx context.Context, l *kt.Log) (err error) { defer trace.StartRegion(ctx, "writeWal").End() + if err = r.wal.Write(l); err != nil { err = errors.Wrap(err, "write follower log failed") } diff --git a/kayak/runtime.go b/kayak/runtime.go index 47038a7de..f6bed6e5c 100644 --- a/kayak/runtime.go +++ b/kayak/runtime.go @@ -303,15 +303,11 @@ func (r *Runtime) Apply(ctx context.Context, req interface{}) (result interface{ Debug("kayak leader apply") }() - waitForLockRegion := trace.StartRegion(ctx, "peersLock") - r.peersLock.RLock() defer r.peersLock.RUnlock() tm.Add("peers_lock") - waitForLockRegion.End() - if r.role != proto.Leader { // not leader err = kt.ErrNotLeader @@ -354,15 +350,11 @@ func (r *Runtime) Fetch(ctx context.Context, index uint64) (l *kt.Log, err error Debug("kayak log fetch") }() - waitForLockRegion := trace.StartRegion(ctx, "peersLock") - r.peersLock.RLock() defer r.peersLock.RUnlock() tm.Add("peers_lock") - waitForLockRegion.End() - if r.role != proto.Leader { // not leader err = kt.ErrNotLeader @@ -400,15 +392,11 @@ func (r *Runtime) FollowerApply(l *kt.Log) (err error) { Debug("kayak follower apply") }() - waitForLockRegion := trace.StartRegion(ctx, "peersLock") - r.peersLock.RLock() defer r.peersLock.RUnlock() tm.Add("peers_lock") - waitForLockRegion.End() - if r.role == proto.Leader { // not follower err = kt.ErrNotFollower diff --git a/kayak/tracker.go b/kayak/tracker.go index 32671004e..f94440636 100644 --- a/kayak/tracker.go +++ b/kayak/tracker.go @@ -21,6 +21,7 @@ import ( "sync" "sync/atomic" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/trace" ) @@ -115,7 +116,16 @@ func (t *rpcTracker) done() { } func (t *rpcTracker) get(ctx context.Context) (errors map[proto.NodeID]error, meets bool, finished bool) { - defer trace.StartRegion(ctx, "rpcCall").End() + if trace.IsEnabled() { + // get request log type + traceType := "rpcCall" + + if rawReq, ok := t.req.(*kt.ApplyRequest); ok { + traceType += rawReq.Log.Type.String() + } + + defer trace.StartRegion(ctx, traceType).End() + } for { select { diff --git a/types/request_type.go b/types/request_type.go index 651b4990c..bb3eef07d 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -17,6 +17,7 @@ package types import ( + "context" "fmt" "time" @@ -24,6 +25,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/trace" ) //go:generate hsp @@ -119,6 +121,9 @@ func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // Verify checks hash and signature in whole request. func (r *Request) Verify() (err error) { + _, task := trace.NewTask(context.Background(), "RequestVerify") + defer task.End() + // verify payload hash in signed header if err = verifyHash(&r.Payload, &r.Header.QueriesHash); err != nil { return @@ -129,6 +134,9 @@ func (r *Request) Verify() (err error) { // Sign the request. func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { + _, task := trace.NewTask(context.Background(), "RequestSign") + defer task.End() + // set query count r.Header.BatchCount = uint64(len(r.Payload.Queries)) diff --git a/types/response_type.go b/types/response_type.go index 968385eca..bf6a244e7 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -17,12 +17,14 @@ package types import ( + "context" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/pkg/errors" ) @@ -87,6 +89,9 @@ func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) // Verify checks hash and signature in whole response. func (sh *Response) Verify() (err error) { + _, task := trace.NewTask(context.Background(), "ResponseVerify") + defer task.End() + // verify data hash in header if err = verifyHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { return @@ -97,6 +102,9 @@ func (sh *Response) Verify() (err error) { // Sign the request. func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { + _, task := trace.NewTask(context.Background(), "ResponseSign") + defer task.End() + // set rows count sh.Header.RowCount = uint64(len(sh.Payload.Rows)) diff --git a/worker/db.go b/worker/db.go index b708fef0f..200e0a6f8 100644 --- a/worker/db.go +++ b/worker/db.go @@ -35,6 +35,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -249,11 +250,15 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: + _, task := trace.NewTask(context.Background(), "ReadQuery") + defer task.End() if tracker, response, err = db.chain.Query(request, false); err != nil { err = errors.Wrap(err, "failed to query read query") return } case types.WriteQuery: + _, task := trace.NewTask(context.Background(), "WriteQuery") + defer task.End() if db.cfg.UseEventualConsistency { // reset context request.SetContext(context.Background()) @@ -382,11 +387,6 @@ func (db *Database) Destroy() (err error) { } func (db *Database) writeQuery(request *types.Request) (tracker *x.QueryTracker, response *types.Response, err error) { - //ctx := context.Background() - //ctx, task := trace.NewTask(ctx, "writeQuery") - //defer task.End() - //defer trace.StartRegion(ctx, "writeQueryRegion").End() - // check database size first, wal/kayak/chain database size is not included if db.cfg.SpaceLimit > 0 { path := filepath.Join(db.cfg.DataDir, StorageFileName) From af4da18c28a276a73438fccba8439184dc336480 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 24 Jan 2019 16:07:13 +0800 Subject: [PATCH 285/302] Use client pool and connection pool for multiple physical connections --- client/conn.go | 15 ++-- kayak/runtime_test.go | 40 +++++++-- kayak/utils.go | 3 +- rpc/client.go | 15 ++-- rpc/clientpool_caller.go | 151 +++++++++++++++++++++++++++++++ rpc/leak_test.go | 4 +- rpc/pool.go | 186 ++++++++++++++++++++------------------- rpc/pool_test.go | 74 ++++++++-------- rpc/rpcutil_test.go | 13 +-- rpc/server.go | 2 +- rpc/server_test.go | 12 +-- 11 files changed, 350 insertions(+), 165 deletions(-) create mode 100644 rpc/clientpool_caller.go diff --git a/client/conn.go b/client/conn.go index 7dc0a3a1b..6457958bf 100644 --- a/client/conn.go +++ b/client/conn.go @@ -54,7 +54,7 @@ type conn struct { type pconn struct { parent *conn ackCh chan *types.Ack - pCaller *rpc.PersistentCaller + pCaller *rpc.ClientPoolCaller } func newConn(cfg *Config) (c *conn, err error) { @@ -86,7 +86,7 @@ func newConn(cfg *Config) (c *conn, err error) { if cfg.UseLeader { c.leader = &pconn{ parent: c, - pCaller: rpc.NewPersistentCaller(peers.Leader), + pCaller: rpc.NewClientPoolCaller(peers.Leader), } } @@ -97,7 +97,7 @@ func newConn(cfg *Config) (c *conn, err error) { if node != peers.Leader { c.follower = &pconn{ parent: c, - pCaller: rpc.NewPersistentCaller(node), + pCaller: rpc.NewClientPoolCaller(node), } break } @@ -138,7 +138,7 @@ func (c *pconn) stopAckWorkers() { func (c *pconn) ackWorker() { var ( oneTime sync.Once - pc *rpc.PersistentCaller + pc *rpc.ClientPoolCaller err error ) @@ -149,7 +149,7 @@ ackWorkerLoop: break ackWorkerLoop } oneTime.Do(func() { - pc = rpc.NewPersistentCaller(c.pCaller.TargetID) + pc = rpc.NewClientPoolCaller(c.pCaller.TargetID) }) if err = ack.Sign(c.parent.privKey, false); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") @@ -165,7 +165,7 @@ ackWorkerLoop: } if pc != nil { - pc.CloseStream() + pc.Close() } log.Debug("ack worker quiting") @@ -174,7 +174,7 @@ ackWorkerLoop: func (c *pconn) close() error { c.stopAckWorkers() if c.pCaller != nil { - c.pCaller.CloseStream() + c.pCaller.Close() } return nil } @@ -407,6 +407,7 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries var response types.Response if err = func() error { + // writeQuery region defer trace.StartRegion(ctx, queryType.String()+"Query").End() return uc.pCaller.Call(route.DBSQuery.String(), req, &response) }(); err != nil { diff --git a/kayak/runtime_test.go b/kayak/runtime_test.go index 6f1d7878a..2291cd49e 100644 --- a/kayak/runtime_test.go +++ b/kayak/runtime_test.go @@ -31,6 +31,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/etls" "github.com/CovenantSQL/CovenantSQL/kayak" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kl "github.com/CovenantSQL/CovenantSQL/kayak/wal" @@ -42,6 +43,7 @@ import ( mock_conn "github.com/jordwest/mock-conn" "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" + "github.com/xtaci/smux" ) func init() { @@ -185,20 +187,42 @@ func (s *fakeService) serveConn(c net.Conn) { type fakeCaller struct { m *fakeMux target proto.NodeID + s *smux.Session } -func newFakeCaller(m *fakeMux, nodeID proto.NodeID) *fakeCaller { - return &fakeCaller{ +func newFakeCaller(m *fakeMux, nodeID proto.NodeID) (c *fakeCaller) { + fakeConn := mock_conn.NewConn() + cipher1 := etls.NewCipher([]byte("123")) + cipher2 := etls.NewCipher([]byte("123")) + serverConn := etls.NewConn(fakeConn.Server, cipher1, nil) + clientConn := etls.NewConn(fakeConn.Client, cipher2, nil) + + muxSess, _ := smux.Server(serverConn, smux.DefaultConfig()) + go func() { + for { + s, err := muxSess.AcceptStream() + if err != nil { + break + } + + go c.m.get(c.target).serveConn(s) + } + }() + + muxClientSess, _ := smux.Client(clientConn, smux.DefaultConfig()) + + c = &fakeCaller{ m: m, target: nodeID, + s: muxClientSess, } + + return } func (c *fakeCaller) Call(method string, req interface{}, resp interface{}) (err error) { - fakeConn := mock_conn.NewConn() - - go c.m.get(c.target).serveConn(fakeConn.Server) - client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(fakeConn.Client)) + s, err := c.s.OpenStream() + client := rpc.NewClientWithCodec(utils.GetMsgPackClientCodec(s)) defer client.Close() return client.Call(method, req, resp) @@ -516,7 +540,7 @@ func TestRuntime(t *testing.T) { func BenchmarkRuntime(b *testing.B) { Convey("runtime test", b, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) f, err := os.OpenFile("test.log", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0600) So(err, ShouldBeNil) log.SetOutput(f) @@ -674,7 +698,7 @@ func BenchmarkRuntime(b *testing.B) { So(d1, ShouldHaveLength, 1) So(d1[0], ShouldHaveLength, 1) _ = total - So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) + //So(fmt.Sprint(d1[0][0]), ShouldEqual, fmt.Sprint(total)) //_, _, d2, _ := db2.Query(context.Background(), []storage.Query{ // {Pattern: "SELECT COUNT(1) FROM test"}, diff --git a/kayak/utils.go b/kayak/utils.go index 79382e214..eef955df0 100644 --- a/kayak/utils.go +++ b/kayak/utils.go @@ -18,13 +18,14 @@ package kayak import ( "encoding/binary" + kt "github.com/CovenantSQL/CovenantSQL/kayak/types" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/rpc" ) func (r *Runtime) getCaller(id proto.NodeID) Caller { - var caller Caller = rpc.NewPersistentCaller(id) + var caller Caller = rpc.NewClientPoolCaller(id) rawCaller, _ := r.callerMap.LoadOrStore(id, caller) return rawCaller.(Caller) } diff --git a/rpc/client.go b/rpc/client.go index 2fa769e5c..efdca3bd0 100644 --- a/rpc/client.go +++ b/rpc/client.go @@ -21,15 +21,14 @@ import ( "net" "net/rpc" - "github.com/pkg/errors" - mux "github.com/xtaci/smux" - "github.com/CovenantSQL/CovenantSQL/crypto/etls" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/pkg/errors" + mux "github.com/xtaci/smux" ) const ( @@ -45,14 +44,14 @@ type Client struct { } var ( - // YamuxConfig holds the default Yamux config - YamuxConfig *mux.Config + // MuxConfig holds the default mux config + MuxConfig *mux.Config // DefaultDialer holds the default dialer of SessionPool DefaultDialer func(nodeID proto.NodeID) (conn net.Conn, err error) ) func init() { - YamuxConfig = mux.DefaultConfig() + MuxConfig = mux.DefaultConfig() DefaultDialer = dialToNode } @@ -111,7 +110,7 @@ func DialToNode(nodeID proto.NodeID, pool *SessionPool, isAnonymous bool) (conn if err != nil { return } - sess, err = mux.Client(ETLSConn, YamuxConfig) + sess, err = mux.Client(ETLSConn, MuxConfig) if err != nil { err = errors.Wrapf(err, "init yamux client to %s failed", nodeID) return @@ -192,7 +191,7 @@ func InitClientConn(conn net.Conn) (client *Client, err error) { muxConn, ok := conn.(*mux.Stream) if !ok { var sess *mux.Session - sess, err = mux.Client(conn, YamuxConfig) + sess, err = mux.Client(conn, MuxConfig) if err != nil { err = errors.Wrap(err, "init mux client failed") return diff --git a/rpc/clientpool_caller.go b/rpc/clientpool_caller.go new file mode 100644 index 000000000..a33822167 --- /dev/null +++ b/rpc/clientpool_caller.go @@ -0,0 +1,151 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpc + +import ( + "context" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/utils/trace" + "github.com/pkg/errors" + "io" + "net" + "net/rpc" + "strings" + "sync" +) + +// ClientPoolCaller is a wrapper for session pooling and client pooling. +type ClientPoolCaller struct { + sessPool *SessionPool + clientPool sync.Pool + TargetID proto.NodeID +} + +// NewClientPoolCaller returns a client/session pool caller. +func NewClientPoolCaller(target proto.NodeID) (c *ClientPoolCaller) { + c = &ClientPoolCaller{ + TargetID: target, + sessPool: GetSessionPoolInstance(), + } + + c.clientPool = sync.Pool{ + New: c.initClient, + } + + return +} + +func (c *ClientPoolCaller) initClient() interface{} { + var ( + client *Client + err error + ) + + client, err = c.initClientEx(false) + if err != nil { + return err + } + + return client +} + +func (c *ClientPoolCaller) initClientEx(isAnonymous bool) (client *Client, err error) { + var conn net.Conn + conn, err = DialToNode(c.TargetID, c.sessPool, isAnonymous) + if err != nil { + err = errors.Wrap(err, "dial to node failed") + return + } + client, err = InitClientConn(conn) + if err != nil { + err = errors.Wrap(err, "init RPC client failed") + return + } + return +} + +func (c *ClientPoolCaller) allocClient(isAnonymous bool) (client *Client, err error) { + if isAnonymous { + return c.initClientEx(true) + } + + rawClient := c.clientPool.Get() + + if rawClient == nil { + err = errors.New("no available client") + return + } + + switch v := rawClient.(type) { + case *Client: + client = v + case error: + err = v + } + + return +} + +func (c *ClientPoolCaller) Call(method string, args interface{}, reply interface{}) (err error) { + ctx, task := trace.NewTask(context.Background(), "Call"+method) + defer task.End() + + var ( + isAnonymous = method == route.DHTPing.String() + client *Client + ) + + func() { + defer trace.StartRegion(ctx, "allocClient").End() + client, err = c.allocClient(isAnonymous) + }() + if err != nil { + return + } + + func() { + defer trace.StartRegion(ctx, "realCall").End() + err = client.Call(method, args, reply) + }() + + func() { + defer trace.StartRegion(ctx, "cleanup").End() + if !isAnonymous && (err == nil || (err != io.EOF && + err != io.ErrUnexpectedEOF && + err != io.ErrClosedPipe && + err != rpc.ErrShutdown && + !strings.Contains(strings.ToLower(err.Error()), "shut down") && + !strings.Contains(strings.ToLower(err.Error()), "broken pipe"))) { + // put back connection + c.clientPool.Put(client) + } else { + // close + client.Close() + } + }() + + if err != nil { + err = errors.Wrapf(err, "call %s failed", method) + } + + return +} + +func (c *ClientPoolCaller) Close() { + // free pool +} diff --git a/rpc/leak_test.go b/rpc/leak_test.go index 7bca030c1..9e93bc9f1 100644 --- a/rpc/leak_test.go +++ b/rpc/leak_test.go @@ -31,7 +31,7 @@ import ( ) func TestSessionPool_SessionBroken(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) var err error conf.GConf, err = conf.LoadConfig(FJ(testWorkingDir, "./leak/client.yaml")) @@ -109,7 +109,7 @@ func TestSessionPool_SessionBroken(t *testing.T) { } pool := GetSessionPoolInstance() - sess, _ := pool.getSessionFromPool(leaderNodeID) + sess, _ := pool.getSession(leaderNodeID) log.Debugf("session for %s, %#v", leaderNodeID, sess) sess.Close() diff --git a/rpc/pool.go b/rpc/pool.go index 1611c1ab1..761fa5244 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -20,17 +20,16 @@ import ( "net" "sync" + "github.com/CovenantSQL/CovenantSQL/proto" "github.com/pkg/errors" mux "github.com/xtaci/smux" - - "github.com/CovenantSQL/CovenantSQL/proto" ) +const MaxPhysicalConnection = 10 + // SessPool is the session pool interface type SessPool interface { Get(proto.NodeID) (net.Conn, error) - Set(proto.NodeID, net.Conn) bool - Remove(proto.NodeID) Close() Len() int } @@ -43,16 +42,18 @@ type SessionMap map[proto.NodeID]*Session // Session is the Session type of SessionPool type Session struct { - ID proto.NodeID - Sess *mux.Session - conn net.Conn + sync.RWMutex + nodeDialer NodeDialer + target proto.NodeID + sess []*mux.Session + offset int } // SessionPool is the struct type of session pool type SessionPool struct { + sync.RWMutex sessions SessionMap nodeDialer NodeDialer - sync.RWMutex } var ( @@ -62,7 +63,72 @@ var ( // Close closes the session func (s *Session) Close() { - s.Sess.Close() + s.Lock() + defer s.Unlock() + for _, s := range s.sess { + _ = s.Close() + } + s.sess = s.sess[:0] +} + +// Get returns new connection from session. +func (s *Session) Get() (conn net.Conn, err error) { + s.Lock() + defer s.Unlock() + s.offset++ + s.offset %= MaxPhysicalConnection + + var ( + sess *mux.Session + stream *mux.Stream + sessions []*mux.Session + ) + + for { + if len(s.sess) <= s.offset { + // open new session + sess, err = s.newSession() + if err != nil { + return + } + s.sess = append(s.sess, sess) + s.offset = len(s.sess) - 1 + } else { + sess = s.sess[s.offset] + } + + // open connection + stream, err = sess.OpenStream() + if err != nil { + // invalidate session + sessions = nil + sessions = append(sessions, s.sess[0:s.offset]...) + sessions = append(sessions, s.sess[s.offset+1:]...) + s.sess = sessions + continue + } + + conn = stream + return + } +} + +// Len returns physical connection count. +func (s *Session) Len() int { + s.RLock() + defer s.RUnlock() + return len(s.sess) +} + +func (s *Session) newSession() (sess *mux.Session, err error) { + var conn net.Conn + conn, err = s.nodeDialer(s.target) + if err != nil { + err = errors.Wrap(err, "dialing new session connection failed") + return + } + + return mux.Client(conn, MuxConfig) } // newSessionPool creates a new SessionPool @@ -81,104 +147,42 @@ func GetSessionPoolInstance() *SessionPool { return instance } -// toSession wraps net.Conn to mux.Session -func toSession(id proto.NodeID, conn net.Conn) (sess *Session, err error) { - // create mux session - newSess, err := mux.Client(conn, YamuxConfig) - if err != nil { - //log.Errorf("dial to new node %s failed: %s", id, err) // no log in lock - return - } - // Store it - sess = &Session{ - ID: id, - Sess: newSess, - conn: conn, - } - return -} - -// LoadOrStore returns the existing Session for the node id if present. Otherwise, it stores and -// returns the given Session. The loaded result is true if the Session was loaded, false if stored. -func (p *SessionPool) LoadOrStore(id proto.NodeID, newSess *Session) (sess *Session, loaded bool) { +func (p *SessionPool) getSession(id proto.NodeID) (sess *Session, loaded bool) { // NO Blocking operation in this function p.Lock() + defer p.Unlock() sess, exist := p.sessions[id] if exist { - p.Unlock() //log.WithField("node", id).Debug("load session for target node") loaded = true } else { - p.sessions[id] = newSess - p.Unlock() - sess = newSess + // new session + sess = &Session{ + nodeDialer: p.nodeDialer, + target: id, + } + p.sessions[id] = sess } return } -func (p *SessionPool) getSessionFromPool(id proto.NodeID) (sess *Session, ok bool) { - sess, ok = p.sessions[id] - return -} - // Get returns existing session to the node, if not exist try best to create one func (p *SessionPool) Get(id proto.NodeID) (conn net.Conn, err error) { - // first try to get one session from pool - p.Lock() - cachedConn, ok := p.getSessionFromPool(id) - p.Unlock() - if ok { - conn, err = cachedConn.Sess.OpenStream() - if err == nil { - //log.WithField("node", id).Debug("reusing session") - return - } - //log.WithField("target", id).WithError(err).Debug("open session failed") - p.Remove(id) - } - - //log.WithField("target", id).Debug("dialing new session") - // Can't find existing Session, try to dial one - newConn, err := p.nodeDialer(id) - if err != nil { - err = errors.Wrapf(err, "dial new session to %s failed", id) - return - } - newSess, err := toSession(id, newConn) - if err != nil { - newConn.Close() - err = errors.Wrapf(err, "create new session to %s failed", id) - return - } - sess, loaded := p.LoadOrStore(id, newSess) - if loaded { - newSess.Close() - } - return sess.Sess.OpenStream() -} - -// Set tries to set a new connection to the pool, typically from Accept() -// if there is an existing one, just do nothing -func (p *SessionPool) Set(id proto.NodeID, conn net.Conn) (exist bool) { - sess, err := toSession(id, conn) - if err != nil { - return - } - _, exist = p.LoadOrStore(id, sess) - return + var sess *Session + sess, _ = p.getSession(id) + return sess.Get() } // Remove the node sessions in the pool func (p *SessionPool) Remove(id proto.NodeID) { p.Lock() - sess, ok := p.getSessionFromPool(id) - if ok { - delete(p.sessions, id) - p.Unlock() + defer p.Unlock() + sess, exist := p.sessions[id] + if exist { sess.Close() - } else { - p.Unlock() + delete(p.sessions, id) } + return } // Close closes all sessions in the pool @@ -192,8 +196,12 @@ func (p *SessionPool) Close() { } // Len returns the session counts in the pool -func (p *SessionPool) Len() int { +func (p *SessionPool) Len() (total int) { p.RLock() defer p.RUnlock() - return len(p.sessions) + + for _, s := range p.sessions { + total += s.Len() + } + return } diff --git a/rpc/pool_test.go b/rpc/pool_test.go index 6cac13b3b..a60da6fab 100644 --- a/rpc/pool_test.go +++ b/rpc/pool_test.go @@ -32,7 +32,7 @@ import ( const ( localAddr = "127.0.0.1:4444" localAddr2 = "127.0.0.1:4445" - concurrency = 4 + concurrency = 15 packetCount = 100 ) @@ -48,35 +48,37 @@ func server(c C, localAddr string, n int) error { // Accept a TCP connection listener, err := net.Listen("tcp", localAddr) go func() { - conn, err := listener.Accept() - c.So(err, ShouldBeNil) - - // Setup server side of mux - log.Println("creating server session") - session, err := mux.Server(conn, nil) - c.So(err, ShouldBeNil) - for i := 0; i < concurrency; i++ { - go func(i int, c2 C) { - // Accept a stream - //c2.So(err, ShouldBeNil) - // Stream implements net.Conn - // Listen for a message - //c2.So(string(buf1), ShouldEqual, "ping") - log.Println("accepting stream") - stream, err := session.AcceptStream() - if err == nil { - buf1 := make([]byte, 4) - for i := 0; i < n; { - n, err := stream.Read(buf1) - if n == 4 && err == nil { - i++ - c2.So(string(buf1), ShouldEqual, "ping") + go func() { + conn, err := listener.Accept() + c.So(err, ShouldBeNil) + + // Setup server side of mux + log.Println("creating server session") + session, err := mux.Server(conn, nil) + c.So(err, ShouldBeNil) + + for i := 0; i < concurrency; i++ { + // Accept a stream + //c.So(err, ShouldBeNil) + // Stream implements net.Conn + // Listen for a message + //c.So(string(buf1), ShouldEqual, "ping") + log.Println("accepting stream") + stream, err := session.AcceptStream() + if err == nil { + buf1 := make([]byte, 4) + for i := 0; i < n; { + n, err := stream.Read(buf1) + if n == 4 && err == nil { + i++ + c.So(string(buf1), ShouldEqual, "ping") + } } + log.Debugf("buf#%d read done", i) } - log.Debugf("buf#%d read done", i) } - }(i, c) + }() } }() return err @@ -84,7 +86,7 @@ func server(c C, localAddr string, n int) error { func BenchmarkSessionPool_Get(b *testing.B) { Convey("session pool", b, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) p := newSessionPool(func(nodeID proto.NodeID) (net.Conn, error) { log.Debugf("creating new connection to %s", nodeID) return net.Dial("tcp", string(nodeID)) @@ -116,7 +118,7 @@ func BenchmarkSessionPool_Get(b *testing.B) { func TestNewSessionPool(t *testing.T) { Convey("session pool", t, func(c C) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) p := newSessionPool(func(nodeID proto.NodeID) (net.Conn, error) { log.Debugf("creating new connection to %s", nodeID) return net.Dial("tcp", string(nodeID)) @@ -149,15 +151,12 @@ func TestNewSessionPool(t *testing.T) { } wg.Wait() - So(p.Len(), ShouldEqual, 1) + So(p.Len(), ShouldEqual, MaxPhysicalConnection) server(c, localAddr2, packetCount) - conn, _ := net.Dial("tcp", localAddr2) - exists := p.Set(proto.NodeID(localAddr2), conn) - So(exists, ShouldBeFalse) - exists = p.Set(proto.NodeID(localAddr2), conn) - So(exists, ShouldBeTrue) - So(p.Len(), ShouldEqual, 2) + _, err := p.Get(proto.NodeID(localAddr2)) + So(err, ShouldBeNil) + So(p.Len(), ShouldEqual, MaxPhysicalConnection+1) wg2 := &sync.WaitGroup{} wg2.Add(concurrency) @@ -182,14 +181,13 @@ func TestNewSessionPool(t *testing.T) { } wg2.Wait() - So(p.Len(), ShouldEqual, 2) + So(p.Len(), ShouldEqual, MaxPhysicalConnection*2) p.Remove(proto.NodeID(localAddr2)) - So(p.Len(), ShouldEqual, 1) + So(p.Len(), ShouldEqual, MaxPhysicalConnection) p.Close() So(p.Len(), ShouldEqual, 0) - }) Convey("session pool get instance", t, func(c C) { diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 57d716bdb..ea26405de 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -34,6 +34,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/utils/trace" . "github.com/smartystreets/goconvey/convey" ) @@ -43,7 +44,7 @@ const ( ) func TestCaller_CallNode(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) os.Remove(publicKeyStore) @@ -163,7 +164,7 @@ func TestCaller_CallNode(t *testing.T) { } func TestNewPersistentCaller(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) os.Remove(publicKeyStore) @@ -275,11 +276,11 @@ func TestNewPersistentCaller(t *testing.T) { client2.CloseStream() wg.Wait() - sess, ok := client2.pool.getSessionFromPool(conf.GConf.BP.NodeID) + sess, ok := client2.pool.getSession(conf.GConf.BP.NodeID) if !ok { t.Fatalf("can not find session for %s", conf.GConf.BP.NodeID) } - sess.conn.Close() + sess.Close() client3 := NewPersistentCaller(conf.GConf.BP.NodeID) err = client3.Call("DHT.FindNeighbor", reqF2, respF2) @@ -332,12 +333,14 @@ func BenchmarkPersistentCaller_CallKayakLog(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { + _, task := trace.NewTask(context.Background(), "callOnce") req := &FakeRequest{} req.Log.Data = []byte(strings.Repeat("1", 500)) err = client.Call("Test.Call", req, nil) if err != nil { b.Error(err) } + task.End() } }) b.StopTimer() @@ -362,7 +365,7 @@ type FakeRequest struct { } func (s *fakeService) Call(req *FakeRequest, resp *interface{}) (err error) { - time.Sleep(time.Microsecond * 200) + time.Sleep(time.Microsecond * 600) return } diff --git a/rpc/server.go b/rpc/server.go index 8bb834d3d..409e10d76 100644 --- a/rpc/server.go +++ b/rpc/server.go @@ -135,7 +135,7 @@ func (s *Server) handleConn(conn net.Conn) { remoteNodeID = conn.(*etls.CryptoConn).NodeID } - sess, err := mux.Server(conn, YamuxConfig) + sess, err := mux.Server(conn, MuxConfig) if err != nil { err = errors.Wrap(err, "create mux server failed") return diff --git a/rpc/server_test.go b/rpc/server_test.go index 0c089150e..f4a4b5ea6 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -79,7 +79,7 @@ func (s *TestService) IncCounterSimpleArgs(step int, ret *int) error { } func TestIncCounter(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { @@ -120,7 +120,7 @@ func TestIncCounter(t *testing.T) { } func TestIncCounterSimpleArgs(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { @@ -149,7 +149,7 @@ func TestIncCounterSimpleArgs(t *testing.T) { func TestEncryptIncCounterSimpleArgs(t *testing.T) { defer os.Remove(PubKeyStorePath) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") server, err := NewServerWithService(ServiceMap{"Test": NewTestService()}) @@ -187,7 +187,7 @@ func TestEncryptIncCounterSimpleArgs(t *testing.T) { func TestETLSBug(t *testing.T) { defer os.Remove(PubKeyStorePath) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") server, err := NewServerWithService(ServiceMap{"Test": NewTestService()}) @@ -234,7 +234,7 @@ func TestETLSBug(t *testing.T) { func TestEncPingFindNeighbor(t *testing.T) { os.Remove(PubKeyStorePath) defer os.Remove(PubKeyStorePath) - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") dht, err := route.NewDHTService(PubKeyStorePath, new(consistent.KMSStorage), true) @@ -315,7 +315,7 @@ func TestEncPingFindNeighbor(t *testing.T) { } func TestServer_Close(t *testing.T) { - log.SetLevel(log.DebugLevel) + log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" l, err := net.Listen("tcp", addr) if err != nil { From 20f4f85265a71813960105a8b7bf2a90febde083 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 28 Jan 2019 11:35:45 +0800 Subject: [PATCH 286/302] Drop off nested signature in embedded request/response/ack structure --- client/conn.go | 9 +- cmd/cql-minerd/various_metric_test.go | 6 +- cmd/cql-observer/api.go | 12 +- sqlchain/ackindex.go | 18 +- sqlchain/ackindex_test.go | 14 +- sqlchain/chain.go | 10 +- sqlchain/xxx_test.go | 16 +- types/ack_type.go | 66 +++--- types/ack_type_gen.go | 37 ++-- types/no_ack_report_type.go | 129 ------------ types/no_ack_report_type_gen.go | 224 -------------------- types/no_ack_report_type_gen_test.go | 232 -------------------- types/request_type.go | 18 +- types/response_type.go | 39 ++-- types/response_type_gen.go | 11 +- types/types_test.go | 293 +++----------------------- types/xxx_test.go | 3 +- worker/db_test.go | 14 +- xenomint/state.go | 25 ++- 19 files changed, 173 insertions(+), 1003 deletions(-) delete mode 100644 types/no_ack_report_type.go delete mode 100644 types/no_ack_report_type_gen.go delete mode 100644 types/no_ack_report_type_gen_test.go diff --git a/client/conn.go b/client/conn.go index 6457958bf..37405faf7 100644 --- a/client/conn.go +++ b/client/conn.go @@ -151,7 +151,7 @@ ackWorkerLoop: oneTime.Do(func() { pc = rpc.NewClientPoolCaller(c.pCaller.TargetID) }) - if err = ack.Sign(c.parent.privKey, false); err != nil { + if err = ack.Sign(c.parent.privKey); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") continue } @@ -434,9 +434,10 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries uc.ackCh <- &types.Ack{ Header: types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: response.Header, - NodeID: c.localNodeID, - Timestamp: getLocalTime(), + Response: response.Header.ResponseHeader, + ResponseHash: response.Header.Hash(), + NodeID: c.localNodeID, + Timestamp: getLocalTime(), }, }, } diff --git a/cmd/cql-minerd/various_metric_test.go b/cmd/cql-minerd/various_metric_test.go index 66f95f865..ca24afd27 100644 --- a/cmd/cql-minerd/various_metric_test.go +++ b/cmd/cql-minerd/various_metric_test.go @@ -178,7 +178,8 @@ func BenchmarkSignSignature(b *testing.B) { rs := &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: r.Header, + Request: r.Header.RequestHeader, + RequestHash: r.Header.Hash(), NodeID: n.ToRawNodeID().ToNodeID(), Timestamp: time.Now().UTC(), RowCount: 1, @@ -339,7 +340,8 @@ func TestComputeMetrics(t *testing.T) { rs := &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: r.Header, + Request: r.Header.RequestHeader, + RequestHash: r.Header.Hash(), NodeID: n.ToRawNodeID().ToNodeID(), Timestamp: time.Now().UTC(), RowCount: 1, diff --git a/cmd/cql-observer/api.go b/cmd/cql-observer/api.go index 6738d97b6..b2d8c3fb3 100644 --- a/cmd/cql-observer/api.go +++ b/cmd/cql-observer/api.go @@ -596,8 +596,8 @@ func (a *explorerAPI) formatResponseHeader(resp *types.SignedResponseHeader) map "affected_rows": resp.AffectedRows, }, "request": map[string]interface{}{ - "hash": resp.Request.Hash().String(), - "timestamp": a.formatTime(resp.Request.Timestamp), + "hash": resp.GetRequestHash().String(), + "timestamp": a.formatTime(resp.GetRequestTimestamp()), "node": resp.Request.NodeID, "type": resp.Request.QueryType.String(), "count": resp.Request.BatchCount, @@ -609,15 +609,15 @@ func (a *explorerAPI) formatAck(ack *types.SignedAckHeader) map[string]interface return map[string]interface{}{ "ack": map[string]interface{}{ "request": map[string]interface{}{ - "hash": ack.Response.Request.Hash().String(), - "timestamp": a.formatTime(ack.Response.Request.Timestamp), + "hash": ack.GetRequestHash().String(), + "timestamp": a.formatTime(ack.GetRequestTimestamp()), "node": ack.Response.Request.NodeID, "type": ack.Response.Request.QueryType.String(), "count": ack.Response.Request.BatchCount, }, "response": map[string]interface{}{ - "hash": ack.Response.Hash().String(), - "timestamp": a.formatTime(ack.Response.Timestamp), + "hash": ack.GetResponseHash().String(), + "timestamp": a.formatTime(ack.GetResponseTimestamp()), "node": ack.Response.NodeID, "log_id": ack.Response.LogOffset, // savepoint id in eventual consistency mode "last_insert_id": ack.Response.LastInsertID, diff --git a/sqlchain/ackindex.go b/sqlchain/ackindex.go index 748937cc6..ded9e5965 100644 --- a/sqlchain/ackindex.go +++ b/sqlchain/ackindex.go @@ -61,7 +61,7 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { var ( resp *types.SignedResponseHeader ok bool - key = ack.SignedRequestHeader().GetQueryKey() + key = ack.GetQueryKey() ) log.Debugf("registering key %s <-- ack %s", &key, ack.Hash()) @@ -71,7 +71,7 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { err = errors.Wrapf(ErrQueryNotFound, "register key %s <-- ack %s", &key, ack.Hash()) return } - if resp.Hash() != ack.ResponseHash() { + if resp.Hash() != ack.GetResponseHash() { err = errors.Wrapf(ErrResponseSeqNotMatch, "register key %s <-- ack %s", &key, ack.Hash()) } delete(i.respIndex, key) @@ -82,7 +82,7 @@ func (i *multiAckIndex) register(ack *types.SignedAckHeader) (err error) { } func (i *multiAckIndex) remove(ack *types.SignedAckHeader) (err error) { - var key = ack.SignedRequestHeader().GetQueryKey() + var key = ack.GetQueryKey() log.Debugf("removing key %s -x- ack %s", &key, ack.Hash()) i.Lock() defer i.Unlock() @@ -120,8 +120,8 @@ func (i *multiAckIndex) expire() { // TODO(leventeliu): need further processing. for _, v := range i.respIndex { log.WithFields(log.Fields{ - "request_hash": v.Request.Hash(), - "request_time": v.Request.Timestamp, + "request_hash": v.GetRequestHash(), + "request_time": v.GetRequestTimestamp(), "request_type": v.Request.QueryType, "request_node": v.Request.NodeID, "response_hash": v.Hash(), @@ -131,13 +131,13 @@ func (i *multiAckIndex) expire() { } for _, v := range i.ackIndex { log.WithFields(log.Fields{ - "request_hash": v.Response.Request.Hash(), - "request_time": v.Response.Request.Timestamp, + "request_hash": v.GetRequestHash(), + "request_time": v.GetRequestTimestamp(), "request_type": v.Response.Request.QueryType, "request_node": v.Response.Request.NodeID, - "response_hash": v.Response.Hash(), + "response_hash": v.GetResponseHash(), "response_node": v.Response.NodeID, - "response_time": v.Response.Timestamp, + "response_time": v.GetResponseTimestamp(), "ack_hash": v.Hash(), "ack_node": v.NodeID, "ack_time": v.Timestamp, diff --git a/sqlchain/ackindex_test.go b/sqlchain/ackindex_test.go index 77ccd4d18..53698613d 100644 --- a/sqlchain/ackindex_test.go +++ b/sqlchain/ackindex_test.go @@ -32,19 +32,17 @@ func TestAckIndex(t *testing.T) { ai = newAckIndex() resp = &types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: types.SignedRequestHeader{ - RequestHeader: types.RequestHeader{ - NodeID: proto.NodeID( - "0000000000000000000000000000000000000000000000000000000000000000"), - ConnectionID: 0, - SeqNo: 0, - }, + Request: types.RequestHeader{ + NodeID: proto.NodeID( + "0000000000000000000000000000000000000000000000000000000000000000"), + ConnectionID: 0, + SeqNo: 0, }, }, } ack = &types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: *resp, + Response: resp.ResponseHeader, }, } ) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index e7130574f..6684aad22 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -529,7 +529,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { // pushAckedQuery pushes a acknowledged, signed and verified query into the chain. func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { log.WithField("db", c.databaseID).Debugf("push ack %s", ack.Hash().String()) - h := c.rt.getHeightFromTime(ack.SignedResponseHeader().Timestamp) + h := c.rt.getHeightFromTime(ack.GetResponseTimestamp()) k := heightToKey(h) var enc *bytes.Buffer @@ -1104,7 +1104,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { // VerifyAndPushAckedQuery verifies a acknowledged and signed query, and pushed it if valid. func (c *Chain) VerifyAndPushAckedQuery(ack *types.SignedAckHeader) (err error) { // TODO(leventeliu): check ack. - if c.rt.queryTimeIsExpired(ack.SignedResponseHeader().Timestamp) { + if c.rt.queryTimeIsExpired(ack.GetResponseTimestamp()) { err = errors.Wrapf(ErrQueryExpired, "Verify ack query, min valid height %d, ack height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) return } @@ -1208,15 +1208,15 @@ func (c *Chain) Query( // AddResponse addes a response to the ackIndex, awaiting for acknowledgement. func (c *Chain) AddResponse(resp *types.SignedResponseHeader) (err error) { - return c.ai.addResponse(c.rt.getHeightFromTime(resp.Request.Timestamp), resp) + return c.ai.addResponse(c.rt.getHeightFromTime(resp.GetRequestTimestamp()), resp) } func (c *Chain) register(ack *types.SignedAckHeader) (err error) { - return c.ai.register(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) + return c.ai.register(c.rt.getHeightFromTime(ack.GetRequestTimestamp()), ack) } func (c *Chain) remove(ack *types.SignedAckHeader) (err error) { - return c.ai.remove(c.rt.getHeightFromTime(ack.SignedRequestHeader().Timestamp), ack) + return c.ai.remove(c.rt.getHeightFromTime(ack.GetRequestTimestamp()), ack) } func (c *Chain) pruneBlockCache() { diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 80b5719cb..c5c1383a6 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -146,9 +146,10 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( resp := &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: *req, - NodeID: worker.NodeID, - Timestamp: createRandomTimeAfter(req.Timestamp, 100), + Request: req.RequestHeader, + RequestHash: req.Hash(), + NodeID: worker.NodeID, + Timestamp: createRandomTimeAfter(req.Timestamp, 100), }, }, Payload: types.ResponsePayload{ @@ -180,14 +181,15 @@ func createRandomQueryAckWithResponse(resp *types.SignedResponseHeader, cli *nod ack := &types.Ack{ Header: types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: *resp, - NodeID: cli.NodeID, - Timestamp: createRandomTimeAfter(resp.Timestamp, 100), + Response: resp.ResponseHeader, + ResponseHash: resp.Hash(), + NodeID: cli.NodeID, + Timestamp: createRandomTimeAfter(resp.Timestamp, 100), }, }, } - if err = ack.Sign(cli.PrivateKey, true); err != nil { + if err = ack.Sign(cli.PrivateKey); err != nil { return } diff --git a/types/ack_type.go b/types/ack_type.go index af24a9da9..267c8ca6a 100644 --- a/types/ack_type.go +++ b/types/ack_type.go @@ -29,9 +29,35 @@ import ( // AckHeader defines client ack entity. type AckHeader struct { - Response SignedResponseHeader `json:"r"` - NodeID proto.NodeID `json:"i"` // ack node id - Timestamp time.Time `json:"t"` // time in UTC zone + Response ResponseHeader `json:"r"` + ResponseHash hash.Hash `json:"rh"` + NodeID proto.NodeID `json:"i"` // ack node id + Timestamp time.Time `json:"t"` // time in UTC zone +} + +// GetQueryKey returns the request query key. +func (h *AckHeader) GetQueryKey() QueryKey { + return h.Response.Request.GetQueryKey() +} + +// GetRequestTimestamp returns the request timestamp. +func (h *AckHeader) GetRequestTimestamp() time.Time { + return h.Response.GetRequestTimestamp() +} + +// GetResponseTimestamp returns the response timestamp. +func (h *AckHeader) GetResponseTimestamp() time.Time { + return h.Response.Timestamp +} + +// GetRequestHash returns the request hash. +func (h *AckHeader) GetRequestHash() hash.Hash { + return h.Response.GetRequestHash() +} + +// GetResponseHash returns the response hash. +func (h *AckHeader) GetResponseHash() hash.Hash { + return h.ResponseHash } // SignedAckHeader defines client signed ack entity. @@ -51,24 +77,11 @@ type AckResponse struct{} // Verify checks hash and signature in ack header. func (sh *SignedAckHeader) Verify() (err error) { - // verify response - if err = sh.Response.Verify(); err != nil { - return - } - return sh.DefaultHashSignVerifierImpl.Verify(&sh.AckHeader) } // Sign the request. -func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { - // Only used by ack worker, and ack.Header is verified before build ack - if verifyReqHeader { - // check original header signature - if err = sh.Response.Verify(); err != nil { - return - } - } - +func (sh *SignedAckHeader) Sign(signer *asymmetric.PrivateKey) (err error) { return sh.DefaultHashSignVerifierImpl.Sign(&sh.AckHeader, signer) } @@ -78,22 +91,7 @@ func (a *Ack) Verify() error { } // Sign the request. -func (a *Ack) Sign(signer *asymmetric.PrivateKey, verifyReqHeader bool) (err error) { +func (a *Ack) Sign(signer *asymmetric.PrivateKey) (err error) { // sign - return a.Header.Sign(signer, verifyReqHeader) -} - -// ResponseHash returns the deep shadowed Response Hash field. -func (sh *SignedAckHeader) ResponseHash() hash.Hash { - return sh.AckHeader.Response.Hash() -} - -// SignedRequestHeader returns the deep shadowed Request reference. -func (sh *SignedAckHeader) SignedRequestHeader() *SignedRequestHeader { - return &sh.AckHeader.Response.Request -} - -// SignedResponseHeader returns the Response reference. -func (sh *SignedAckHeader) SignedResponseHeader() *SignedResponseHeader { - return &sh.Response + return a.Header.Sign(signer) } diff --git a/types/ack_type_gen.go b/types/ack_type_gen.go index 3537af696..d623af12b 100644 --- a/types/ack_type_gen.go +++ b/types/ack_type_gen.go @@ -18,19 +18,12 @@ func (z *Ack) MarshalHash() (o []byte, err error) { o = hsp.AppendBytes(o, oTemp) } // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x83) - if oTemp, err := z.Header.AckHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Header.AckHeader.NodeID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.Header.AckHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = hsp.AppendTime(o, z.Header.AckHeader.Timestamp) if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -41,7 +34,7 @@ func (z *Ack) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Ack) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 10 + 1 + 9 + z.Header.AckHeader.Response.Msgsize() + 7 + z.Header.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 10 + z.Header.AckHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() return } @@ -49,8 +42,8 @@ func (z *Ack) Msgsize() (s int) { func (z *AckHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83) + // map header, size 4 + o = append(o, 0x84) if oTemp, err := z.NodeID.MarshalHash(); err != nil { return nil, err } else { @@ -61,13 +54,18 @@ func (z *AckHeader) MarshalHash() (o []byte, err error) { } else { o = hsp.AppendBytes(o, oTemp) } + if oTemp, err := z.ResponseHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendTime(o, z.Timestamp) return } // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *AckHeader) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 10 + hsp.TimeSize + s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 13 + z.ResponseHash.Msgsize() + 10 + hsp.TimeSize return } @@ -91,19 +89,12 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x83) - if oTemp, err := z.AckHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.AckHeader.NodeID.MarshalHash(); err != nil { + o = append(o, 0x82) + if oTemp, err := z.AckHeader.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } - o = hsp.AppendTime(o, z.AckHeader.Timestamp) if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { return nil, err } else { @@ -114,6 +105,6 @@ func (z *SignedAckHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedAckHeader) Msgsize() (s int) { - s = 1 + 10 + 1 + 9 + z.AckHeader.Response.Msgsize() + 7 + z.AckHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + s = 1 + 10 + z.AckHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() return } diff --git a/types/no_ack_report_type.go b/types/no_ack_report_type.go deleted file mode 100644 index 29187a451..000000000 --- a/types/no_ack_report_type.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp - -// NoAckReportHeader defines worker issued client no ack report. -type NoAckReportHeader struct { - NodeID proto.NodeID // reporter node id - Timestamp time.Time // time in UTC zone - Response SignedResponseHeader -} - -// SignedNoAckReportHeader defines worker worker issued/signed client no ack report. -type SignedNoAckReportHeader struct { - NoAckReportHeader - verifier.DefaultHashSignVerifierImpl -} - -// NoAckReport defines whole worker no client ack report. -type NoAckReport struct { - proto.Envelope - Header SignedNoAckReportHeader -} - -// AggrNoAckReportHeader defines worker leader aggregated client no ack report. -type AggrNoAckReportHeader struct { - NodeID proto.NodeID // aggregated report node id - Timestamp time.Time // time in UTC zone - Reports []SignedNoAckReportHeader // no-ack reports - Peers *proto.Peers // serving peers during report -} - -// SignedAggrNoAckReportHeader defines worker leader aggregated/signed client no ack report. -type SignedAggrNoAckReportHeader struct { - AggrNoAckReportHeader - verifier.DefaultHashSignVerifierImpl -} - -// AggrNoAckReport defines whole worker leader no client ack report. -type AggrNoAckReport struct { - proto.Envelope - Header SignedAggrNoAckReportHeader -} - -// Verify checks hash and signature in signed no ack report header. -func (sh *SignedNoAckReportHeader) Verify() (err error) { - // verify original response - if err = sh.Response.Verify(); err != nil { - return - } - - return sh.DefaultHashSignVerifierImpl.Verify(&sh.NoAckReportHeader) -} - -// Sign the request. -func (sh *SignedNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // verify original response - if err = sh.Response.Verify(); err != nil { - return - } - - return sh.DefaultHashSignVerifierImpl.Sign(&sh.NoAckReportHeader, signer) -} - -// Verify checks hash and signature in whole no ack report. -func (r *NoAckReport) Verify() error { - return r.Header.Verify() -} - -// Sign the request. -func (r *NoAckReport) Sign(signer *asymmetric.PrivateKey) error { - return r.Header.Sign(signer) -} - -// Verify checks hash and signature in aggregated no ack report. -func (sh *SignedAggrNoAckReportHeader) Verify() (err error) { - // verify original reports - for _, r := range sh.Reports { - if err = r.Verify(); err != nil { - return - } - } - - return sh.DefaultHashSignVerifierImpl.Verify(&sh.AggrNoAckReportHeader) -} - -// Sign the request. -func (sh *SignedAggrNoAckReportHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - for _, r := range sh.Reports { - if err = r.Verify(); err != nil { - return - } - } - - return sh.DefaultHashSignVerifierImpl.Sign(&sh.AggrNoAckReportHeader, signer) -} - -// Verify the whole aggregation no ack report. -func (r *AggrNoAckReport) Verify() (err error) { - return r.Header.Verify() -} - -// Sign the request. -func (r *AggrNoAckReport) Sign(signer *asymmetric.PrivateKey) error { - return r.Header.Sign(signer) -} diff --git a/types/no_ack_report_type_gen.go b/types/no_ack_report_type_gen.go deleted file mode 100644 index 2db24c1e7..000000000 --- a/types/no_ack_report_type_gen.go +++ /dev/null @@ -1,224 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *AggrNoAckReport) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.Header.AggrNoAckReportHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AggrNoAckReport) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 22 + z.Header.AggrNoAckReportHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *AggrNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if z.Peers == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Peers.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Reports))) - for za0001 := range z.Reports { - // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x83) - if oTemp, err := z.Reports[za0001].NoAckReportHeader.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendTime(o, z.Reports[za0001].NoAckReportHeader.Timestamp) - if oTemp, err := z.Reports[za0001].NoAckReportHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Reports[za0001].DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - o = hsp.AppendTime(o, z.Timestamp) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *AggrNoAckReportHeader) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 6 - if z.Peers == nil { - s += hsp.NilSize - } else { - s += z.Peers.Msgsize() - } - s += 8 + hsp.ArrayHeaderSize - for za0001 := range z.Reports { - s += 1 + 18 + 1 + 7 + z.Reports[za0001].NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.Reports[za0001].NoAckReportHeader.Response.Msgsize() + 28 + z.Reports[za0001].DefaultHashSignVerifierImpl.Msgsize() - } - s += 10 + hsp.TimeSize - return -} - -// MarshalHash marshals for hash -func (z *NoAckReport) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.Envelope.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - // map header, size 2 - // map header, size 3 - o = append(o, 0x82, 0x83) - if oTemp, err := z.Header.NoAckReportHeader.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendTime(o, z.Header.NoAckReportHeader.Timestamp) - if oTemp, err := z.Header.NoAckReportHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *NoAckReport) Msgsize() (s int) { - s = 1 + 9 + z.Envelope.Msgsize() + 7 + 1 + 18 + 1 + 7 + z.Header.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.Header.NoAckReportHeader.Response.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *NoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83) - if oTemp, err := z.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendTime(o, z.Timestamp) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *NoAckReportHeader) Msgsize() (s int) { - s = 1 + 7 + z.NodeID.Msgsize() + 9 + z.Response.Msgsize() + 10 + hsp.TimeSize - return -} - -// MarshalHash marshals for hash -func (z *SignedAggrNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.AggrNoAckReportHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SignedAggrNoAckReportHeader) Msgsize() (s int) { - s = 1 + 22 + z.AggrNoAckReportHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *SignedNoAckReportHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 2 - o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - // map header, size 3 - o = append(o, 0x83) - if oTemp, err := z.NoAckReportHeader.NodeID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendTime(o, z.NoAckReportHeader.Timestamp) - if oTemp, err := z.NoAckReportHeader.Response.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *SignedNoAckReportHeader) Msgsize() (s int) { - s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 18 + 1 + 7 + z.NoAckReportHeader.NodeID.Msgsize() + 10 + hsp.TimeSize + 9 + z.NoAckReportHeader.Response.Msgsize() - return -} diff --git a/types/no_ack_report_type_gen_test.go b/types/no_ack_report_type_gen_test.go deleted file mode 100644 index 26a9ce408..000000000 --- a/types/no_ack_report_type_gen_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "testing" -) - -func TestMarshalHashAggrNoAckReport(t *testing.T) { - v := AggrNoAckReport{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashAggrNoAckReport(b *testing.B) { - v := AggrNoAckReport{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgAggrNoAckReport(b *testing.B) { - v := AggrNoAckReport{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashAggrNoAckReportHeader(t *testing.T) { - v := AggrNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashAggrNoAckReportHeader(b *testing.B) { - v := AggrNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgAggrNoAckReportHeader(b *testing.B) { - v := AggrNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashNoAckReport(t *testing.T) { - v := NoAckReport{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashNoAckReport(b *testing.B) { - v := NoAckReport{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgNoAckReport(b *testing.B) { - v := NoAckReport{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashNoAckReportHeader(t *testing.T) { - v := NoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashNoAckReportHeader(b *testing.B) { - v := NoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgNoAckReportHeader(b *testing.B) { - v := NoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashSignedAggrNoAckReportHeader(t *testing.T) { - v := SignedAggrNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashSignedAggrNoAckReportHeader(b *testing.B) { - v := SignedAggrNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgSignedAggrNoAckReportHeader(b *testing.B) { - v := SignedAggrNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashSignedNoAckReportHeader(t *testing.T) { - v := SignedNoAckReportHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashSignedNoAckReportHeader(b *testing.B) { - v := SignedNoAckReportHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgSignedNoAckReportHeader(b *testing.B) { - v := SignedNoAckReportHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} diff --git a/types/request_type.go b/types/request_type.go index bb3eef07d..ea3f05e8a 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -71,6 +71,15 @@ type RequestHeader struct { QueriesHash hash.Hash `json:"qh"` // hash of query payload } +// GetQueryKey returns a unique query key of this request. +func (h *RequestHeader) GetQueryKey() QueryKey { + return QueryKey{ + NodeID: h.NodeID, + ConnectionID: h.ConnectionID, + SeqNo: h.SeqNo, + } +} + // QueryKey defines an unique query key of a request. type QueryKey struct { NodeID proto.NodeID `json:"id"` @@ -157,12 +166,3 @@ func (r *Request) SetMarshalCache(buf []byte) { func (r *Request) GetMarshalCache() (buf []byte) { return r._marshalCache } - -// GetQueryKey returns a unique query key of this request. -func (sh *SignedRequestHeader) GetQueryKey() QueryKey { - return QueryKey{ - NodeID: sh.NodeID, - ConnectionID: sh.ConnectionID, - SeqNo: sh.SeqNo, - } -} diff --git a/types/response_type.go b/types/response_type.go index bf6a244e7..615e013eb 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -25,7 +25,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils/trace" - "github.com/pkg/errors" ) //go:generate hsp @@ -44,14 +43,25 @@ type ResponsePayload struct { // ResponseHeader defines a query response header. type ResponseHeader struct { - Request SignedRequestHeader `json:"r"` - NodeID proto.NodeID `json:"id"` // response node id - Timestamp time.Time `json:"t"` // time in UTC zone - RowCount uint64 `json:"c"` // response row count of payload - LogOffset uint64 `json:"o"` // request log offset - LastInsertID int64 `json:"l"` // insert insert id - AffectedRows int64 `json:"a"` // affected rows - PayloadHash hash.Hash `json:"dh"` // hash of query response payload + Request RequestHeader `json:"r"` + RequestHash hash.Hash `json:"rh"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + PayloadHash hash.Hash `json:"dh"` // hash of query response payload +} + +// GetRequestHash returns the request hash. +func (h *ResponseHeader) GetRequestHash() hash.Hash { + return h.RequestHash +} + +// GetRequestTimestamp returns the request timestamp. +func (h *ResponseHeader) GetRequestTimestamp() time.Time { + return h.Request.Timestamp } // SignedResponseHeader defines a signed query response header. @@ -68,22 +78,11 @@ type Response struct { // Verify checks hash and signature in response header. func (sh *SignedResponseHeader) Verify() (err error) { - // verify original request header - if err = sh.Request.Verify(); err != nil { - return - } - return sh.DefaultHashSignVerifierImpl.Verify(&sh.ResponseHeader) } // Sign the request. func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - // make sure original header is signed - if err = sh.Request.Verify(); err != nil { - err = errors.Wrapf(err, "SignedResponseHeader %v", sh) - return - } - return sh.DefaultHashSignVerifierImpl.Sign(&sh.ResponseHeader, signer) } diff --git a/types/response_type_gen.go b/types/response_type_gen.go index 0c97de2eb..9efec7c49 100644 --- a/types/response_type_gen.go +++ b/types/response_type_gen.go @@ -41,8 +41,8 @@ func (z *Response) Msgsize() (s int) { func (z *ResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 8 - o = append(o, 0x88) + // map header, size 9 + o = append(o, 0x89) o = hsp.AppendInt64(o, z.AffectedRows) o = hsp.AppendInt64(o, z.LastInsertID) o = hsp.AppendUint64(o, z.LogOffset) @@ -61,6 +61,11 @@ func (z *ResponseHeader) MarshalHash() (o []byte, err error) { } else { o = hsp.AppendBytes(o, oTemp) } + if oTemp, err := z.RequestHash.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendUint64(o, z.RowCount) o = hsp.AppendTime(o, z.Timestamp) return @@ -68,7 +73,7 @@ func (z *ResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponseHeader) Msgsize() (s int) { - s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize + s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 12 + z.RequestHash.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize return } diff --git a/types/types_test.go b/types/types_test.go index 29724c951..7f482bd00 100644 --- a/types/types_test.go +++ b/types/types_test.go @@ -25,10 +25,8 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/pkg/errors" . "github.com/smartystreets/goconvey/convey" "github.com/ugorji/go/codec" ) @@ -161,15 +159,13 @@ func TestResponse_Sign(t *testing.T) { res := &Response{ Header: SignedResponseHeader{ ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, + Request: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), }, NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), Timestamp: time.Now().UTC(), @@ -216,19 +212,7 @@ func TestResponse_Sign(t *testing.T) { var err error - // sign directly, embedded original request is not filled - err = res.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign original request first - err = res.Header.Request.Sign(privKey) - So(err, ShouldBeNil) - - // sign again + // sign err = res.Sign(privKey) So(err, ShouldBeNil) @@ -279,22 +263,18 @@ func TestAck_Sign(t *testing.T) { ack := &Ack{ Header: SignedAckHeader{ AckHeader: AckHeader{ - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), + Response: ResponseHeader{ + Request: RequestHeader{ + QueryType: WriteQuery, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), + DatabaseID: proto.DatabaseID("db1"), + ConnectionID: uint64(1), + SeqNo: uint64(2), + Timestamp: time.Now().UTC(), }, + NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), + Timestamp: time.Now().UTC(), + RowCount: uint64(1), }, NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), Timestamp: time.Now().UTC(), @@ -305,30 +285,13 @@ func TestAck_Sign(t *testing.T) { var err error Convey("get query key", func() { - key := ack.Header.SignedRequestHeader().GetQueryKey() - So(key.NodeID, ShouldEqual, ack.Header.SignedRequestHeader().NodeID) - So(key.ConnectionID, ShouldEqual, ack.Header.SignedRequestHeader().ConnectionID) - So(key.SeqNo, ShouldEqual, ack.Header.SignedRequestHeader().SeqNo) - }) - - // sign directly, embedded original response is not filled - err = ack.Sign(privKey, false) - So(err, ShouldBeNil) - err = ack.Sign(privKey, true) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, + key := ack.Header.GetQueryKey() + So(key.NodeID, ShouldEqual, ack.Header.GetQueryKey().NodeID) + So(key.ConnectionID, ShouldEqual, ack.Header.GetQueryKey().ConnectionID) + So(key.SeqNo, ShouldEqual, ack.Header.GetQueryKey().SeqNo) }) - // sign nested structure, step by step - // this is not required during runtime - // during runtime, nested structures is signed and provided by peers - err = ack.Header.Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = ack.Header.Response.Sign(privKey) - So(err, ShouldBeNil) - err = ack.Sign(privKey, true) + err = ack.Sign(privKey) So(err, ShouldBeNil) Convey("verify", func() { @@ -357,214 +320,6 @@ func TestAck_Sign(t *testing.T) { }) } -func TestNoAckReport_Sign(t *testing.T) { - privKey, _ := getCommKeys() - - Convey("sign", t, func() { - noAck := &NoAckReport{ - Header: SignedNoAckReportHeader{ - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - } - - var err error - - // sign directly, embedded original response/request is not filled - err = noAck.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign nested structure - err = noAck.Header.Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = noAck.Header.Response.Sign(privKey) - So(err, ShouldBeNil) - err = noAck.Sign(privKey) - So(err, ShouldBeNil) - - Convey("verify", func() { - err = noAck.Verify() - So(err, ShouldBeNil) - - Convey("request change", func() { - noAck.Header.Response.Request.QueryType = ReadQuery - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("response change", func() { - noAck.Header.Response.RowCount = 100 - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("header change", func() { - noAck.Header.Timestamp = noAck.Header.Timestamp.Add(time.Second) - - err = noAck.Verify() - So(err, ShouldNotBeNil) - }) - }) - }) -} - -func TestAggrNoAckReport_Sign(t *testing.T) { - privKey, _ := getCommKeys() - - Convey("sign", t, func() { - aggrNoAck := &AggrNoAckReport{ - Header: SignedAggrNoAckReportHeader{ - AggrNoAckReportHeader: AggrNoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - Reports: []SignedNoAckReportHeader{ - { - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - { - NoAckReportHeader: NoAckReportHeader{ - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - Response: SignedResponseHeader{ - ResponseHeader: ResponseHeader{ - Request: SignedRequestHeader{ - RequestHeader: RequestHeader{ - QueryType: WriteQuery, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000001111"), - DatabaseID: proto.DatabaseID("db1"), - ConnectionID: uint64(1), - SeqNo: uint64(2), - Timestamp: time.Now().UTC(), - }, - }, - NodeID: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Timestamp: time.Now().UTC(), - RowCount: uint64(1), - }, - }, - }, - }, - }, - Peers: &proto.Peers{ - PeersHeader: proto.PeersHeader{ - Term: uint64(1), - Leader: proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - Servers: []proto.NodeID{ - proto.NodeID("0000000000000000000000000000000000000000000000000000000000003333"), - proto.NodeID("0000000000000000000000000000000000000000000000000000000000002222"), - }, - }, - }, - }, - }, - } - - var err error - - // sign directly, embedded original response/request is not filled - err = aggrNoAck.Sign(privKey) - So(err, ShouldNotBeNil) - So(errors.Cause(err), ShouldBeIn, []error{ - verifier.ErrHashValueNotMatch, - verifier.ErrSignatureNotMatch, - }) - - // sign nested structure - err = aggrNoAck.Header.Reports[0].Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Response.Request.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[0].Response.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Response.Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[0].Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Header.Reports[1].Sign(privKey) - So(err, ShouldBeNil) - err = aggrNoAck.Sign(privKey) - So(err, ShouldBeNil) - - Convey("verify", func() { - err = aggrNoAck.Verify() - So(err, ShouldBeNil) - - Convey("request change", func() { - aggrNoAck.Header.Reports[0].Response.Request.QueryType = ReadQuery - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("response change", func() { - aggrNoAck.Header.Reports[0].Response.RowCount = 1000 - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("report change", func() { - aggrNoAck.Header.Reports[0].Timestamp = aggrNoAck.Header.Reports[0].Timestamp.Add(time.Second) - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - - Convey("header change", func() { - aggrNoAck.Header.Timestamp = aggrNoAck.Header.Timestamp.Add(time.Second) - - err = aggrNoAck.Verify() - So(err, ShouldNotBeNil) - }) - }) - }) -} - func TestInitServiceResponse_Sign(t *testing.T) { privKey, _ := getCommKeys() diff --git a/types/xxx_test.go b/types/xxx_test.go index cf2454ab6..49dcff7ec 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -251,7 +251,8 @@ func buildResponse(header *SignedRequestHeader, cols []string, types []string, r r = &Response{ Header: SignedResponseHeader{ ResponseHeader: ResponseHeader{ - Request: *header, + Request: header.RequestHeader, + RequestHash: header.Hash(), NodeID: id, Timestamp: time.Now().UTC(), RowCount: 0, diff --git a/worker/db_test.go b/worker/db_test.go index acbb89438..c59e33f07 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -31,9 +31,6 @@ import ( "testing" "time" - "github.com/fortytw2/leaktest" - . "github.com/smartystreets/goconvey/convey" - "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/consistent" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -47,6 +44,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/fortytw2/leaktest" + . "github.com/smartystreets/goconvey/convey" ) var rootHash = hash.Hash{} @@ -620,14 +619,15 @@ func buildAck(res *types.Response) (ack *types.Ack, err error) { ack = &types.Ack{ Header: types.SignedAckHeader{ AckHeader: types.AckHeader{ - Response: res.Header, - NodeID: nodeID, - Timestamp: getLocalTime(), + Response: res.Header.ResponseHeader, + ResponseHash: res.Header.Hash(), + NodeID: nodeID, + Timestamp: getLocalTime(), }, }, } - err = ack.Sign(privateKey, true) + err = ack.Sign(privateKey) return } diff --git a/xenomint/state.go b/xenomint/state.go index 83835e27e..bdf84735e 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -228,11 +228,12 @@ func (s *State) readWithContext( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, - NodeID: s.nodeID, - Timestamp: s.getLocalTime(), - RowCount: uint64(len(data)), - LogOffset: s.getSeq(), + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: s.getSeq(), }, }, Payload: types.ResponsePayload{ @@ -292,11 +293,12 @@ func (s *State) readTx( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, - NodeID: s.nodeID, - Timestamp: s.getLocalTime(), - RowCount: uint64(len(data)), - LogOffset: id, + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), + NodeID: s.nodeID, + Timestamp: s.getLocalTime(), + RowCount: uint64(len(data)), + LogOffset: id, }, }, Payload: types.ResponsePayload{ @@ -447,7 +449,8 @@ func (s *State) write( resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: req.Header, + Request: req.Header.RequestHeader, + RequestHash: req.Header.Hash(), NodeID: s.nodeID, Timestamp: s.getLocalTime(), RowCount: 0, From 26546d205dc7f0b0dcad5f419e05c7f719f06f91 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 13:58:02 +0800 Subject: [PATCH 287/302] Remove response signature check --- client/conn.go | 8 ----- cmd/cql-minerd/various_metric_test.go | 7 ++-- types/response_type.go | 49 +++++++++++++++++++++------ worker/db.go | 28 ++++++++++----- worker/db_test.go | 34 +++---------------- worker/dbms_rpc.go | 1 + worker/dbms_test.go | 8 ----- xenomint/state_test.go | 6 ++-- 8 files changed, 69 insertions(+), 72 deletions(-) diff --git a/client/conn.go b/client/conn.go index 37405faf7..56d87313c 100644 --- a/client/conn.go +++ b/client/conn.go @@ -413,14 +413,6 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries }(); err != nil { return } - - // verify response - if err = func() error { - defer trace.StartRegion(ctx, "verifyResponse").End() - return response.Verify() - }(); err != nil { - return - } rows = newRows(&response) if queryType == types.WriteQuery { diff --git a/cmd/cql-minerd/various_metric_test.go b/cmd/cql-minerd/various_metric_test.go index ca24afd27..79fffda60 100644 --- a/cmd/cql-minerd/various_metric_test.go +++ b/cmd/cql-minerd/various_metric_test.go @@ -20,6 +20,7 @@ package main import ( "bytes" + "database/sql" "encoding/binary" "io/ioutil" "os" @@ -88,10 +89,8 @@ func BenchmarkDBWrite(b *testing.B) { if err == nil { defer strg.Close() } - state, err = x.NewState(n.ToRawNodeID().ToNodeID(), strg) - if err == nil { - defer state.Close(true) - } + state = x.NewState(sql.LevelReadUncommitted, n.ToRawNodeID().ToNodeID(), strg) + defer state.Close(true) b.ResetTimer() b.Run("commit", func(b *testing.B) { diff --git a/types/response_type.go b/types/response_type.go index 615e013eb..b89493657 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -72,8 +72,9 @@ type SignedResponseHeader struct { // Response defines a complete query response. type Response struct { - Header SignedResponseHeader `json:"h"` - Payload ResponsePayload `json:"p"` + Header SignedResponseHeader `json:"h"` + Payload ResponsePayload `json:"p"` + callback func(res *Response) } // Verify checks hash and signature in response header. @@ -87,31 +88,57 @@ func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) } // Verify checks hash and signature in whole response. -func (sh *Response) Verify() (err error) { +func (r *Response) Verify() (err error) { _, task := trace.NewTask(context.Background(), "ResponseVerify") defer task.End() // verify data hash in header - if err = verifyHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + if err = verifyHash(&r.Payload, &r.Header.PayloadHash); err != nil { return } - return sh.Header.Verify() + return r.Header.Verify() } -// Sign the request. -func (sh *Response) Sign(signer *asymmetric.PrivateKey) (err error) { +// Sign the response. +func (r *Response) Sign(signer *asymmetric.PrivateKey) (err error) { _, task := trace.NewTask(context.Background(), "ResponseSign") defer task.End() + if err = r.BuildHash(); err != nil { + return + } + + return r.SignHash(signer) +} + +// SignHash computes the signature of the response through existing hash. +func (r *Response) SignHash(signer *asymmetric.PrivateKey) (err error) { + return r.Header.SignHash(signer) +} + +// BuildHash computes the hash of the response. +func (r *Response) BuildHash() (err error) { // set rows count - sh.Header.RowCount = uint64(len(sh.Payload.Rows)) + r.Header.RowCount = uint64(len(r.Payload.Rows)) // build hash in header - if err = buildHash(&sh.Payload, &sh.Header.PayloadHash); err != nil { + if err = buildHash(&r.Payload, &r.Header.PayloadHash); err != nil { return } - // sign the request - return sh.Header.Sign(signer) + // compute header hash + return r.Header.SetHash(&r.Header.ResponseHeader) +} + +// SetResponseCallback stores callback function to process after response processed. +func (r *Response) SetResponseCallback(cb func(res *Response)) { + r.callback = cb +} + +// TriggerResponseCallback async executes callback. +func (r *Response) TriggerResponseCallback() { + if r.callback != nil { + go r.callback(r) + } } diff --git a/worker/db.go b/worker/db.go index 200e0a6f8..d71323de2 100644 --- a/worker/db.go +++ b/worker/db.go @@ -277,16 +277,28 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err return nil, errors.Wrap(ErrInvalidRequest, "invalid query type") } - // Sign response - if err = response.Sign(db.privateKey); err != nil { - err = errors.Wrap(err, "failed to sign response") + // build hash + if err = response.BuildHash(); err != nil { + err = errors.Wrap(err, "failed to build response hash") return } - if err = db.chain.AddResponse(&response.Header); err != nil { - err = errors.Wrap(err, "failed to add response to index") - return - } - tracker.UpdateResp(response) + + func(privKey *asymmetric.PrivateKey, tracker *x.QueryTracker) { + response.SetResponseCallback(func(res *types.Response) { + var err error + // Sign response + if err = res.SignHash(privKey); err != nil { + log.WithError(err).Debug("failed to sign response") + return + } + if err = db.chain.AddResponse(&res.Header); err != nil { + log.WithError(err).Debug("failed to add response to index") + return + } + tracker.UpdateResp(res) + }) + }(db.privateKey, tracker) + return } diff --git a/worker/db_test.go b/worker/db_test.go index c59e33f07..12e634218 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -110,8 +110,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) // test show tables query var readQuery *types.Request @@ -122,8 +120,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -138,8 +134,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -154,8 +148,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -172,8 +164,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -190,8 +180,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -208,8 +196,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Rows, ShouldNotBeEmpty) @@ -229,8 +215,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // test select query @@ -242,8 +226,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -268,8 +250,6 @@ func TestSingleDatabase(t *testing.T) { // request once res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // request again with same sequence @@ -315,8 +295,6 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(2)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -358,8 +336,6 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(0)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -381,8 +357,10 @@ func TestSingleDatabase(t *testing.T) { So(err, ShouldBeNil) res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) + + res.TriggerResponseCallback() + // wait for callback to sign signature + time.Sleep(time.Millisecond * 10) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) @@ -525,8 +503,6 @@ func TestDatabaseRecycle(t *testing.T) { res, err = db.Query(writeQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, 0) // test select query @@ -538,8 +514,6 @@ func TestDatabaseRecycle(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - err = res.Verify() - So(err, ShouldBeNil) So(res.Header.RowCount, ShouldEqual, uint64(1)) So(res.Payload.Columns, ShouldResemble, []string{"test"}) diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index e3d5dda3b..bbdcc518d 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -95,6 +95,7 @@ func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err e } *res = *r + r.TriggerResponseCallback() dbQuerySuccCounter.Mark(1) return diff --git a/worker/dbms_test.go b/worker/dbms_test.go index 92309ca6b..a71cb7b65 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -163,8 +163,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) // sending read query @@ -178,8 +176,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) So(queryRes.Payload.Columns, ShouldResemble, []string{"test"}) So(queryRes.Payload.DeclTypes, ShouldResemble, []string{"int"}) @@ -387,8 +383,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) // sending read query @@ -402,8 +396,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) So(queryRes.Payload.Columns, ShouldResemble, []string{"test"}) So(queryRes.Payload.DeclTypes, ShouldResemble, []string{"int"}) diff --git a/xenomint/state_test.go b/xenomint/state_test.go index 58bb000f3..48965d2e4 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -698,7 +698,7 @@ func TestSerializableState(t *testing.T) { }) resp *types.Response ) - _, resp, err = state.Query(req) + _, resp, err = state.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) Convey("The state should not see uncommitted changes", func(c C) { @@ -728,7 +728,7 @@ func TestSerializableState(t *testing.T) { go func() { defer wg.Done() for { - var _, resp, err = state.Query(req) + var _, resp, err = state.Query(req, true) c.So(err, ShouldBeNil) c.So(resp.Header.RowCount, ShouldEqual, 0) select { @@ -742,7 +742,7 @@ func TestSerializableState(t *testing.T) { for i := 0; i < count; i++ { _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), - })) + }), true) So(resp.Payload, ShouldResemble, types.ResponsePayload{ Columns: []string{"cnt"}, DeclTypes: []string{""}, From cb3a864106867b8c6e0532b6f20a0419ca349b00 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 14:47:41 +0800 Subject: [PATCH 288/302] Remove useless traces --- client/conn.go | 13 ++--------- rpc/clientpool_caller.go | 50 +++++++++++++++------------------------- rpc/rpcutil_test.go | 3 --- types/request_type.go | 8 ------- types/response_type.go | 8 ------- worker/db.go | 5 ---- 6 files changed, 20 insertions(+), 67 deletions(-) diff --git a/client/conn.go b/client/conn.go index 56d87313c..b90370226 100644 --- a/client/conn.go +++ b/client/conn.go @@ -353,8 +353,6 @@ func (c *conn) addQuery(ctx context.Context, queryType types.QueryType, query *t } func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries []types.Query) (affectedRows int64, lastInsertID int64, rows driver.Rows, err error) { - ctx, task := trace.NewTask(ctx, "sendQuery") - defer task.End() var uc *pconn // peer connection used to execute the queries uc = c.leader @@ -398,19 +396,12 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries }, } - if err = func() error { - defer trace.StartRegion(ctx, "signRequest").End() - return req.Sign(c.privKey) - }(); err != nil { + if err = req.Sign(c.privKey); err != nil { return } var response types.Response - if err = func() error { - // writeQuery region - defer trace.StartRegion(ctx, queryType.String()+"Query").End() - return uc.pCaller.Call(route.DBSQuery.String(), req, &response) - }(); err != nil { + if err = uc.pCaller.Call(route.DBSQuery.String(), req, &response); err != nil { return } rows = newRows(&response) diff --git a/rpc/clientpool_caller.go b/rpc/clientpool_caller.go index a33822167..aa546e25c 100644 --- a/rpc/clientpool_caller.go +++ b/rpc/clientpool_caller.go @@ -17,16 +17,15 @@ package rpc import ( - "context" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/utils/trace" - "github.com/pkg/errors" "io" "net" "net/rpc" "strings" "sync" + + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/pkg/errors" ) // ClientPoolCaller is a wrapper for session pooling and client pooling. @@ -102,42 +101,29 @@ func (c *ClientPoolCaller) allocClient(isAnonymous bool) (client *Client, err er } func (c *ClientPoolCaller) Call(method string, args interface{}, reply interface{}) (err error) { - ctx, task := trace.NewTask(context.Background(), "Call"+method) - defer task.End() - var ( isAnonymous = method == route.DHTPing.String() client *Client ) - func() { - defer trace.StartRegion(ctx, "allocClient").End() - client, err = c.allocClient(isAnonymous) - }() + client, err = c.allocClient(isAnonymous) if err != nil { return } - func() { - defer trace.StartRegion(ctx, "realCall").End() - err = client.Call(method, args, reply) - }() - - func() { - defer trace.StartRegion(ctx, "cleanup").End() - if !isAnonymous && (err == nil || (err != io.EOF && - err != io.ErrUnexpectedEOF && - err != io.ErrClosedPipe && - err != rpc.ErrShutdown && - !strings.Contains(strings.ToLower(err.Error()), "shut down") && - !strings.Contains(strings.ToLower(err.Error()), "broken pipe"))) { - // put back connection - c.clientPool.Put(client) - } else { - // close - client.Close() - } - }() + err = client.Call(method, args, reply) + if !isAnonymous && (err == nil || (err != io.EOF && + err != io.ErrUnexpectedEOF && + err != io.ErrClosedPipe && + err != rpc.ErrShutdown && + !strings.Contains(strings.ToLower(err.Error()), "shut down") && + !strings.Contains(strings.ToLower(err.Error()), "broken pipe"))) { + // put back connection + c.clientPool.Put(client) + } else { + // close + client.Close() + } if err != nil { err = errors.Wrapf(err, "call %s failed", method) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index ea26405de..088b14dca 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -34,7 +34,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/CovenantSQL/CovenantSQL/utils/trace" . "github.com/smartystreets/goconvey/convey" ) @@ -333,14 +332,12 @@ func BenchmarkPersistentCaller_CallKayakLog(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { - _, task := trace.NewTask(context.Background(), "callOnce") req := &FakeRequest{} req.Log.Data = []byte(strings.Repeat("1", 500)) err = client.Call("Test.Call", req, nil) if err != nil { b.Error(err) } - task.End() } }) b.StopTimer() diff --git a/types/request_type.go b/types/request_type.go index ea3f05e8a..12f121277 100644 --- a/types/request_type.go +++ b/types/request_type.go @@ -17,7 +17,6 @@ package types import ( - "context" "fmt" "time" @@ -25,7 +24,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/trace" ) //go:generate hsp @@ -130,9 +128,6 @@ func (sh *SignedRequestHeader) Sign(signer *asymmetric.PrivateKey) (err error) { // Verify checks hash and signature in whole request. func (r *Request) Verify() (err error) { - _, task := trace.NewTask(context.Background(), "RequestVerify") - defer task.End() - // verify payload hash in signed header if err = verifyHash(&r.Payload, &r.Header.QueriesHash); err != nil { return @@ -143,9 +138,6 @@ func (r *Request) Verify() (err error) { // Sign the request. func (r *Request) Sign(signer *asymmetric.PrivateKey) (err error) { - _, task := trace.NewTask(context.Background(), "RequestSign") - defer task.End() - // set query count r.Header.BatchCount = uint64(len(r.Payload.Queries)) diff --git a/types/response_type.go b/types/response_type.go index b89493657..33da3ff24 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -17,14 +17,12 @@ package types import ( - "context" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/trace" ) //go:generate hsp @@ -89,9 +87,6 @@ func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) // Verify checks hash and signature in whole response. func (r *Response) Verify() (err error) { - _, task := trace.NewTask(context.Background(), "ResponseVerify") - defer task.End() - // verify data hash in header if err = verifyHash(&r.Payload, &r.Header.PayloadHash); err != nil { return @@ -102,9 +97,6 @@ func (r *Response) Verify() (err error) { // Sign the response. func (r *Response) Sign(signer *asymmetric.PrivateKey) (err error) { - _, task := trace.NewTask(context.Background(), "ResponseSign") - defer task.End() - if err = r.BuildHash(); err != nil { return } diff --git a/worker/db.go b/worker/db.go index d71323de2..70ae53986 100644 --- a/worker/db.go +++ b/worker/db.go @@ -35,7 +35,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" - "github.com/CovenantSQL/CovenantSQL/utils/trace" x "github.com/CovenantSQL/CovenantSQL/xenomint" "github.com/pkg/errors" ) @@ -250,15 +249,11 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err switch request.Header.QueryType { case types.ReadQuery: - _, task := trace.NewTask(context.Background(), "ReadQuery") - defer task.End() if tracker, response, err = db.chain.Query(request, false); err != nil { err = errors.Wrap(err, "failed to query read query") return } case types.WriteQuery: - _, task := trace.NewTask(context.Background(), "WriteQuery") - defer task.End() if db.cfg.UseEventualConsistency { // reset context request.SetContext(context.Background()) From e7153ca9f31b1f2e7b5cc5b75e9aa5fc1325ba4f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 15:00:21 +0800 Subject: [PATCH 289/302] Disable trace on pprof --- cmd/cql-minerd/integration_test.go | 6 +++--- cmd/cql-minerd/pprof.sh | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 22ec57b00..6ec7a37e5 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -264,7 +264,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_0/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner0.profile"), - "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner0.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8080", "-metric-log", @@ -282,7 +282,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_1/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner1.profile"), - "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner1.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8081", "-metric-log", @@ -300,7 +300,7 @@ func startNodesProfile(bypassSign bool) { FJ(baseDir, "./bin/cql-minerd"), []string{"-config", FJ(testWorkingDir, "./integration/node_miner_2/config.yaml"), "-cpu-profile", FJ(baseDir, "./cmd/cql-minerd/miner2.profile"), - "-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), + //"-trace-file", FJ(baseDir, "./cmd/cql-minerd/miner2.trace"), "-metric-graphite-server", "192.168.2.100:2003", "-profile-server", "0.0.0.0:8082", "-metric-log", diff --git a/cmd/cql-minerd/pprof.sh b/cmd/cql-minerd/pprof.sh index c47e218b2..4eb3555b5 100755 --- a/cmd/cql-minerd/pprof.sh +++ b/cmd/cql-minerd/pprof.sh @@ -4,7 +4,7 @@ make -C ../../ clean make -C ../../ use_all_cores -go test -bench=^BenchmarkMinerTwo$ -benchtime=15s -run ^$ -trace client.trace +go test -bench=^BenchmarkMinerTwo$ -benchtime=15s -run ^$ go tool pprof -text miner1.profile > pprof.txt go tool pprof -svg miner1.profile > tree.svg go-torch -t 180 --width=2400 miner1.profile From 9c90ce6236eff6e24c881a19fa379c021925ba97 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 16:44:09 +0800 Subject: [PATCH 290/302] Golint issues --- rpc/clientpool_caller.go | 2 ++ rpc/pool.go | 1 + 2 files changed, 3 insertions(+) diff --git a/rpc/clientpool_caller.go b/rpc/clientpool_caller.go index aa546e25c..53da8e827 100644 --- a/rpc/clientpool_caller.go +++ b/rpc/clientpool_caller.go @@ -100,6 +100,7 @@ func (c *ClientPoolCaller) allocClient(isAnonymous bool) (client *Client, err er return } +// Call issues rpc request to target node. func (c *ClientPoolCaller) Call(method string, args interface{}, reply interface{}) (err error) { var ( isAnonymous = method == route.DHTPing.String() @@ -132,6 +133,7 @@ func (c *ClientPoolCaller) Call(method string, args interface{}, reply interface return } +// Close does not do anything for client pool caller. func (c *ClientPoolCaller) Close() { // free pool } diff --git a/rpc/pool.go b/rpc/pool.go index 761fa5244..963587412 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -25,6 +25,7 @@ import ( mux "github.com/xtaci/smux" ) +// MaxPhysicalConnection defines max underlying physical connection for one node pair. const MaxPhysicalConnection = 10 // SessPool is the session pool interface From 2748d31e79e932774a2d373828eda30af167bf9a Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 16:51:14 +0800 Subject: [PATCH 291/302] Move magic number to conf/limit --- conf/limits.go | 2 ++ rpc/pool.go | 6 ++---- rpc/pool_test.go | 11 ++++++----- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/conf/limits.go b/conf/limits.go index 8c12c08cb..1c7499c7e 100644 --- a/conf/limits.go +++ b/conf/limits.go @@ -24,4 +24,6 @@ const ( MaxPendingTxsPerAccount = 1000 // MaxTransactionsPerBlock defines the limit of transactions per block. MaxTransactionsPerBlock = 10000 + // MaxRPCPoolPhysicalConnection defines max underlying physical connection for one node pair. + MaxRPCPoolPhysicalConnection = 10 ) diff --git a/rpc/pool.go b/rpc/pool.go index 963587412..fa19853a0 100644 --- a/rpc/pool.go +++ b/rpc/pool.go @@ -20,14 +20,12 @@ import ( "net" "sync" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/pkg/errors" mux "github.com/xtaci/smux" ) -// MaxPhysicalConnection defines max underlying physical connection for one node pair. -const MaxPhysicalConnection = 10 - // SessPool is the session pool interface type SessPool interface { Get(proto.NodeID) (net.Conn, error) @@ -77,7 +75,7 @@ func (s *Session) Get() (conn net.Conn, err error) { s.Lock() defer s.Unlock() s.offset++ - s.offset %= MaxPhysicalConnection + s.offset %= conf.MaxRPCPoolPhysicalConnection var ( sess *mux.Session diff --git a/rpc/pool_test.go b/rpc/pool_test.go index a60da6fab..7a0e5b124 100644 --- a/rpc/pool_test.go +++ b/rpc/pool_test.go @@ -22,6 +22,7 @@ import ( "sync" "testing" + "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -32,7 +33,7 @@ import ( const ( localAddr = "127.0.0.1:4444" localAddr2 = "127.0.0.1:4445" - concurrency = 15 + concurrency = conf.MaxRPCPoolPhysicalConnection + 1 packetCount = 100 ) @@ -151,12 +152,12 @@ func TestNewSessionPool(t *testing.T) { } wg.Wait() - So(p.Len(), ShouldEqual, MaxPhysicalConnection) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection) server(c, localAddr2, packetCount) _, err := p.Get(proto.NodeID(localAddr2)) So(err, ShouldBeNil) - So(p.Len(), ShouldEqual, MaxPhysicalConnection+1) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection+1) wg2 := &sync.WaitGroup{} wg2.Add(concurrency) @@ -181,10 +182,10 @@ func TestNewSessionPool(t *testing.T) { } wg2.Wait() - So(p.Len(), ShouldEqual, MaxPhysicalConnection*2) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection*2) p.Remove(proto.NodeID(localAddr2)) - So(p.Len(), ShouldEqual, MaxPhysicalConnection) + So(p.Len(), ShouldEqual, conf.MaxRPCPoolPhysicalConnection) p.Close() So(p.Len(), ShouldEqual, 0) From 17ab35304509e072e094f5f568c67c42f6204c80 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 17:07:03 +0800 Subject: [PATCH 292/302] Remove response verify test case --- worker/dbms_test.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/worker/dbms_test.go b/worker/dbms_test.go index a71cb7b65..5984bb6c9 100644 --- a/worker/dbms_test.go +++ b/worker/dbms_test.go @@ -274,8 +274,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) }) }) @@ -445,8 +443,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, writeQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, 0) // sending allowed read query @@ -460,8 +456,6 @@ func TestDBMS(t *testing.T) { err = testRequest(route.DBSQuery, readQuery, &queryRes) So(err, ShouldBeNil) - err = queryRes.Verify() - So(err, ShouldBeNil) So(queryRes.Header.RowCount, ShouldEqual, uint64(1)) So(queryRes.Payload.Rows, ShouldHaveLength, 1) So(queryRes.Payload.Rows[0].Values, ShouldHaveLength, 1) From a00c0e7ea8d9034300b193011db68ef5980f21a1 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 17:08:04 +0800 Subject: [PATCH 293/302] Parallel ci test --- alltest.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/alltest.sh b/alltest.sh index 27a419cdf..fd59391dd 100755 --- a/alltest.sh +++ b/alltest.sh @@ -23,9 +23,11 @@ main() { # test package by package for package in $(go list ./... | grep -v "/vendor/"); do - test::package "${package}" + test::package "${package}" & done + wait + set -x gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out bash <(curl -s https://codecov.io/bash) From 134bf7917f26ee6ac588f5c1d9393f4aee28073d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 17:11:10 +0800 Subject: [PATCH 294/302] Disable coverpkg all --- alltest.sh | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/alltest.sh b/alltest.sh index fd59391dd..e49a8d54b 100755 --- a/alltest.sh +++ b/alltest.sh @@ -4,32 +4,14 @@ set -o errexit set -o pipefail set -o nounset -test::package() { - local package="${1:-notset}" - - if [[ "${package}" == "notset" ]]; then - &>2 echo "empty package name" - exit 1 - fi - - local coverage_file="${package//\//.}.cover.out" - echo "[TEST] package=${package}, coverage=${coverage_file}" - go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg="github.com/CovenantSQL/CovenantSQL/..." -coverprofile "${coverage_file}" "${package}" -} - main() { make clean make -j6 bp miner observer - # test package by package - for package in $(go list ./... | grep -v "/vendor/"); do - test::package "${package}" & - done - - wait + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out ./... set -x - gocovmerge *.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out + gocovmerge main.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out bash <(curl -s https://codecov.io/bash) # some benchmarks From aadc90d781aac5091014f532f963a2486b0d0c30 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 17:24:23 +0800 Subject: [PATCH 295/302] Cover special packages for api module --- alltest.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/alltest.sh b/alltest.sh index e49a8d54b..b762c25d6 100755 --- a/alltest.sh +++ b/alltest.sh @@ -8,10 +8,11 @@ main() { make clean make -j6 bp miner observer - go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out ./... + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) + go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... set -x - gocovmerge main.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out + gocovmerge main.cover.out api.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out bash <(curl -s https://codecov.io/bash) # some benchmarks From 149ce725c184a5e809b2b28b919b2616c899c517 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 29 Jan 2019 19:03:50 +0800 Subject: [PATCH 296/302] Update per client pool physical connection limits --- conf/limits.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/conf/limits.go b/conf/limits.go index 1c7499c7e..36359a024 100644 --- a/conf/limits.go +++ b/conf/limits.go @@ -25,5 +25,5 @@ const ( // MaxTransactionsPerBlock defines the limit of transactions per block. MaxTransactionsPerBlock = 10000 // MaxRPCPoolPhysicalConnection defines max underlying physical connection for one node pair. - MaxRPCPoolPhysicalConnection = 10 + MaxRPCPoolPhysicalConnection = 2 ) From bef3bf3ec603a8c169d830c3c6c933ba9b86f5a5 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 30 Jan 2019 11:42:13 +0800 Subject: [PATCH 297/302] Separate read/write stream iv in etls cryptoConn --- crypto/etls/conn.go | 3 --- crypto/etls/encrypt.go | 12 +++--------- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/crypto/etls/conn.go b/crypto/etls/conn.go index eca5c6ebf..f73a3fba8 100644 --- a/crypto/etls/conn.go +++ b/crypto/etls/conn.go @@ -76,9 +76,6 @@ func (c *CryptoConn) Read(b []byte) (n int, err error) { if err = c.initDecrypt(iv); err != nil { return } - if len(c.iv) == 0 { - c.iv = iv - } c.decrypt(header, header) if header[0] != ETLSMagicBytes[0] || header[1] != ETLSMagicBytes[1] { err = errors.New("bad stream ETLS header") diff --git a/crypto/etls/encrypt.go b/crypto/etls/encrypt.go index c50fd9249..34e0eaf39 100644 --- a/crypto/etls/encrypt.go +++ b/crypto/etls/encrypt.go @@ -86,7 +86,6 @@ type Cipher struct { decStream cipher.Stream key []byte info *cipherInfo - iv []byte } // NewCipher creates a cipher that can be used in Dial(), Listen() etc. @@ -109,14 +108,9 @@ func NewCipher(rawKey []byte) (c *Cipher) { // initEncrypt Initializes the block cipher with CFB mode, returns IV. func (c *Cipher) initEncrypt() (iv []byte, err error) { - if c.iv == nil { - iv = make([]byte, c.info.ivLen) - if _, err := io.ReadFull(rand.Reader, iv); err != nil { - return nil, err - } - c.iv = iv - } else { - iv = c.iv + iv = make([]byte, c.info.ivLen) + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return nil, err } c.encStream, err = c.info.newEncStream(c.key, iv) return From 8a4e350573534b234b703f8c3004c992b9d1adf2 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 30 Jan 2019 16:27:59 +0800 Subject: [PATCH 298/302] Rollback rpc client pool --- client/conn.go | 10 +++++----- kayak/utils.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client/conn.go b/client/conn.go index b90370226..5715cad09 100644 --- a/client/conn.go +++ b/client/conn.go @@ -54,7 +54,7 @@ type conn struct { type pconn struct { parent *conn ackCh chan *types.Ack - pCaller *rpc.ClientPoolCaller + pCaller *rpc.PersistentCaller } func newConn(cfg *Config) (c *conn, err error) { @@ -86,7 +86,7 @@ func newConn(cfg *Config) (c *conn, err error) { if cfg.UseLeader { c.leader = &pconn{ parent: c, - pCaller: rpc.NewClientPoolCaller(peers.Leader), + pCaller: rpc.NewPersistentCaller(peers.Leader), } } @@ -97,7 +97,7 @@ func newConn(cfg *Config) (c *conn, err error) { if node != peers.Leader { c.follower = &pconn{ parent: c, - pCaller: rpc.NewClientPoolCaller(node), + pCaller: rpc.NewPersistentCaller(node), } break } @@ -138,7 +138,7 @@ func (c *pconn) stopAckWorkers() { func (c *pconn) ackWorker() { var ( oneTime sync.Once - pc *rpc.ClientPoolCaller + pc *rpc.PersistentCaller err error ) @@ -149,7 +149,7 @@ ackWorkerLoop: break ackWorkerLoop } oneTime.Do(func() { - pc = rpc.NewClientPoolCaller(c.pCaller.TargetID) + pc = rpc.NewPersistentCaller(c.pCaller.TargetID) }) if err = ack.Sign(c.parent.privKey); err != nil { log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") diff --git a/kayak/utils.go b/kayak/utils.go index eef955df0..71ffa6d18 100644 --- a/kayak/utils.go +++ b/kayak/utils.go @@ -25,7 +25,7 @@ import ( ) func (r *Runtime) getCaller(id proto.NodeID) Caller { - var caller Caller = rpc.NewClientPoolCaller(id) + var caller Caller = rpc.NewPersistentCaller(id) rawCaller, _ := r.callerMap.LoadOrStore(id, caller) return rawCaller.(Caller) } From aa562babf41911177835492d93806a2de23ed9a5 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 30 Jan 2019 16:48:35 +0800 Subject: [PATCH 299/302] Remove client pool caller --- rpc/clientpool_caller.go | 139 --------------------------------------- 1 file changed, 139 deletions(-) delete mode 100644 rpc/clientpool_caller.go diff --git a/rpc/clientpool_caller.go b/rpc/clientpool_caller.go deleted file mode 100644 index 53da8e827..000000000 --- a/rpc/clientpool_caller.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2019 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package rpc - -import ( - "io" - "net" - "net/rpc" - "strings" - "sync" - - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/pkg/errors" -) - -// ClientPoolCaller is a wrapper for session pooling and client pooling. -type ClientPoolCaller struct { - sessPool *SessionPool - clientPool sync.Pool - TargetID proto.NodeID -} - -// NewClientPoolCaller returns a client/session pool caller. -func NewClientPoolCaller(target proto.NodeID) (c *ClientPoolCaller) { - c = &ClientPoolCaller{ - TargetID: target, - sessPool: GetSessionPoolInstance(), - } - - c.clientPool = sync.Pool{ - New: c.initClient, - } - - return -} - -func (c *ClientPoolCaller) initClient() interface{} { - var ( - client *Client - err error - ) - - client, err = c.initClientEx(false) - if err != nil { - return err - } - - return client -} - -func (c *ClientPoolCaller) initClientEx(isAnonymous bool) (client *Client, err error) { - var conn net.Conn - conn, err = DialToNode(c.TargetID, c.sessPool, isAnonymous) - if err != nil { - err = errors.Wrap(err, "dial to node failed") - return - } - client, err = InitClientConn(conn) - if err != nil { - err = errors.Wrap(err, "init RPC client failed") - return - } - return -} - -func (c *ClientPoolCaller) allocClient(isAnonymous bool) (client *Client, err error) { - if isAnonymous { - return c.initClientEx(true) - } - - rawClient := c.clientPool.Get() - - if rawClient == nil { - err = errors.New("no available client") - return - } - - switch v := rawClient.(type) { - case *Client: - client = v - case error: - err = v - } - - return -} - -// Call issues rpc request to target node. -func (c *ClientPoolCaller) Call(method string, args interface{}, reply interface{}) (err error) { - var ( - isAnonymous = method == route.DHTPing.String() - client *Client - ) - - client, err = c.allocClient(isAnonymous) - if err != nil { - return - } - - err = client.Call(method, args, reply) - if !isAnonymous && (err == nil || (err != io.EOF && - err != io.ErrUnexpectedEOF && - err != io.ErrClosedPipe && - err != rpc.ErrShutdown && - !strings.Contains(strings.ToLower(err.Error()), "shut down") && - !strings.Contains(strings.ToLower(err.Error()), "broken pipe"))) { - // put back connection - c.clientPool.Put(client) - } else { - // close - client.Close() - } - - if err != nil { - err = errors.Wrapf(err, "call %s failed", method) - } - - return -} - -// Close does not do anything for client pool caller. -func (c *ClientPoolCaller) Close() { - // free pool -} From 5270893023ad8a8bd7373f90c2ba8c922406de41 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 30 Jan 2019 17:49:20 +0800 Subject: [PATCH 300/302] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index be0318fef..b322ab190 100644 --- a/README.md +++ b/README.md @@ -93,7 +93,7 @@ that inspired us: #### Connector -CovenantSQL is still under construction and Testnet is already released, [have a try](https://testnet.covenantsql.io/). +CovenantSQL is still under construction and Testnet is already released, [have a try](https://developers.covenantsql.io/docs/quickstart). - [Golang](client/) From c71a2c1298bfd44bfdc27f4002851ce298861f46 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 30 Jan 2019 17:49:49 +0800 Subject: [PATCH 301/302] Update README-zh.md --- README-zh.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README-zh.md b/README-zh.md index ca6412924..e2e7ea681 100644 --- a/README-zh.md +++ b/README-zh.md @@ -110,7 +110,7 @@ sql.Open("CovenantSQL", dbURI) #### 接口 -CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://testnet.covenantsql.io/). +CovenantSQL仍在建设中,测试网已经发布,[尝试一下](https://developers.covenantsql.io/docs/quickstart). - [Golang](client/) From 19eb4fe272b1d90c0116f4091436bc3f76287a43 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 30 Jan 2019 18:34:25 +0800 Subject: [PATCH 302/302] Update response structure, replace signature/key with simple hash --- cmd/cql-minerd/various_metric_test.go | 25 ++++--- cmd/cql-observer/service.go | 3 +- sqlchain/chain.go | 5 +- sqlchain/xxx_test.go | 2 +- types/response_type.go | 94 ++++++++++++--------------- types/response_type_gen.go | 19 ++++-- types/types_test.go | 14 ++-- types/xxx_test.go | 2 +- worker/db.go | 30 ++++----- worker/db_test.go | 1 - worker/dbms_rpc.go | 1 - xenomint/chain.go | 2 +- 12 files changed, 93 insertions(+), 105 deletions(-) diff --git a/cmd/cql-minerd/various_metric_test.go b/cmd/cql-minerd/various_metric_test.go index 79fffda60..aabe5ced6 100644 --- a/cmd/cql-minerd/various_metric_test.go +++ b/cmd/cql-minerd/various_metric_test.go @@ -27,6 +27,7 @@ import ( "testing" "time" + "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" kt "github.com/CovenantSQL/CovenantSQL/kayak/types" kw "github.com/CovenantSQL/CovenantSQL/kayak/wal" @@ -194,14 +195,14 @@ func BenchmarkSignSignature(b *testing.B) { b.ResetTimer() b.Run("sign nested", func(b *testing.B) { for i := 0; i != b.N; i++ { - err = rs.Sign(priv) + err = rs.BuildHash() } }) b.ResetTimer() b.Run("verify nested", func(b *testing.B) { for i := 0; i != b.N; i++ { - err = rs.Verify() + err = rs.VerifyHash() } }) @@ -336,17 +337,21 @@ func TestComputeMetrics(t *testing.T) { t.Logf("PrepareLogSize: %v", len(buf2.Bytes())) + respNodeAddr, err := crypto.PubKeyHash(priv.PubKey()) + So(err, ShouldBeNil) + rs := &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ - Request: r.Header.RequestHeader, - RequestHash: r.Header.Hash(), - NodeID: n.ToRawNodeID().ToNodeID(), - Timestamp: time.Now().UTC(), - RowCount: 1, - LogOffset: 1, - LastInsertID: 1, - AffectedRows: 1, + Request: r.Header.RequestHeader, + RequestHash: r.Header.Hash(), + NodeID: n.ToRawNodeID().ToNodeID(), + ResponseAccount: respNodeAddr, + Timestamp: time.Now().UTC(), + RowCount: 1, + LogOffset: 1, + LastInsertID: 1, + AffectedRows: 1, }, }, } diff --git a/cmd/cql-observer/service.go b/cmd/cql-observer/service.go index 153e3c339..a2465a368 100644 --- a/cmd/cql-observer/service.go +++ b/cmd/cql-observer/service.go @@ -35,7 +35,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" - bolt "github.com/coreos/bbolt" ) @@ -342,7 +341,7 @@ func (s *Service) addQueryTracker(dbID proto.DatabaseID, height int32, offset in if err = qt.Request.Verify(); err != nil { return } - if err = qt.Response.Verify(); err != nil { + if err = qt.Response.VerifyHash(); err != nil { return } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 6684aad22..2ee56f765 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1276,10 +1276,7 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { } } for _, tx := range block.QueryTxs { - if minerAddr, err = crypto.PubKeyHash(tx.Response.Signee); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") - return - } + minerAddr = tx.Response.ResponseAccount if userAddr, err = crypto.PubKeyHash(tx.Request.Header.Signee); err != nil { log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") return diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index c5c1383a6..ba2c184af 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -167,7 +167,7 @@ func createRandomQueryResponse(cli, worker *nodeProfile) ( } } - if err = resp.Sign(worker.PrivateKey); err != nil { + if err = resp.BuildHash(); err != nil { return } diff --git a/types/response_type.go b/types/response_type.go index 33da3ff24..43b086c7e 100644 --- a/types/response_type.go +++ b/types/response_type.go @@ -19,10 +19,9 @@ package types import ( "time" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/pkg/errors" ) //go:generate hsp @@ -41,15 +40,16 @@ type ResponsePayload struct { // ResponseHeader defines a query response header. type ResponseHeader struct { - Request RequestHeader `json:"r"` - RequestHash hash.Hash `json:"rh"` - NodeID proto.NodeID `json:"id"` // response node id - Timestamp time.Time `json:"t"` // time in UTC zone - RowCount uint64 `json:"c"` // response row count of payload - LogOffset uint64 `json:"o"` // request log offset - LastInsertID int64 `json:"l"` // insert insert id - AffectedRows int64 `json:"a"` // affected rows - PayloadHash hash.Hash `json:"dh"` // hash of query response payload + Request RequestHeader `json:"r"` + RequestHash hash.Hash `json:"rh"` + NodeID proto.NodeID `json:"id"` // response node id + Timestamp time.Time `json:"t"` // time in UTC zone + RowCount uint64 `json:"c"` // response row count of payload + LogOffset uint64 `json:"o"` // request log offset + LastInsertID int64 `json:"l"` // insert insert id + AffectedRows int64 `json:"a"` // affected rows + PayloadHash hash.Hash `json:"dh"` // hash of query response payload + ResponseAccount proto.AccountAddress `json:"aa"` // response account } // GetRequestHash returns the request hash. @@ -65,48 +65,30 @@ func (h *ResponseHeader) GetRequestTimestamp() time.Time { // SignedResponseHeader defines a signed query response header. type SignedResponseHeader struct { ResponseHeader - verifier.DefaultHashSignVerifierImpl + ResponseHash hash.Hash } -// Response defines a complete query response. -type Response struct { - Header SignedResponseHeader `json:"h"` - Payload ResponsePayload `json:"p"` - callback func(res *Response) -} - -// Verify checks hash and signature in response header. -func (sh *SignedResponseHeader) Verify() (err error) { - return sh.DefaultHashSignVerifierImpl.Verify(&sh.ResponseHeader) -} - -// Sign the request. -func (sh *SignedResponseHeader) Sign(signer *asymmetric.PrivateKey) (err error) { - return sh.DefaultHashSignVerifierImpl.Sign(&sh.ResponseHeader, signer) +// Hash returns the response header hash. +func (sh *SignedResponseHeader) Hash() hash.Hash { + return sh.ResponseHash } -// Verify checks hash and signature in whole response. -func (r *Response) Verify() (err error) { - // verify data hash in header - if err = verifyHash(&r.Payload, &r.Header.PayloadHash); err != nil { - return - } - - return r.Header.Verify() +// VerifyHash verify the hash of the response. +func (sh *SignedResponseHeader) VerifyHash() (err error) { + return errors.Wrap(verifyHash(&sh.ResponseHeader, &sh.ResponseHash), + "verify response header hash failed") } -// Sign the response. -func (r *Response) Sign(signer *asymmetric.PrivateKey) (err error) { - if err = r.BuildHash(); err != nil { - return - } - - return r.SignHash(signer) +// BuildHash computes the hash of the response header. +func (sh *SignedResponseHeader) BuildHash() (err error) { + return errors.Wrap(buildHash(&sh.ResponseHeader, &sh.ResponseHash), + "compute response header hash failed") } -// SignHash computes the signature of the response through existing hash. -func (r *Response) SignHash(signer *asymmetric.PrivateKey) (err error) { - return r.Header.SignHash(signer) +// Response defines a complete query response. +type Response struct { + Header SignedResponseHeader `json:"h"` + Payload ResponsePayload `json:"p"` } // BuildHash computes the hash of the response. @@ -116,21 +98,25 @@ func (r *Response) BuildHash() (err error) { // build hash in header if err = buildHash(&r.Payload, &r.Header.PayloadHash); err != nil { + err = errors.Wrap(err, "compute response payload hash failed") return } // compute header hash - return r.Header.SetHash(&r.Header.ResponseHeader) + return r.Header.BuildHash() } -// SetResponseCallback stores callback function to process after response processed. -func (r *Response) SetResponseCallback(cb func(res *Response)) { - r.callback = cb +// VerifyHash verify the hash of the response. +func (r *Response) VerifyHash() (err error) { + if err = verifyHash(&r.Payload, &r.Header.PayloadHash); err != nil { + err = errors.Wrap(err, "verify response payload hash failed") + return + } + + return r.Header.VerifyHash() } -// TriggerResponseCallback async executes callback. -func (r *Response) TriggerResponseCallback() { - if r.callback != nil { - go r.callback(r) - } +// Hash returns the response header hash. +func (r *Response) Hash() hash.Hash { + return r.Header.Hash() } diff --git a/types/response_type_gen.go b/types/response_type_gen.go index 9efec7c49..59fe39ae1 100644 --- a/types/response_type_gen.go +++ b/types/response_type_gen.go @@ -18,7 +18,7 @@ func (z *Response) MarshalHash() (o []byte, err error) { } else { o = hsp.AppendBytes(o, oTemp) } - if oTemp, err := z.Header.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.Header.ResponseHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -33,7 +33,7 @@ func (z *Response) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *Response) Msgsize() (s int) { - s = 1 + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 28 + z.Header.DefaultHashSignVerifierImpl.Msgsize() + 8 + z.Payload.Msgsize() + s = 1 + 7 + 1 + 15 + z.Header.ResponseHeader.Msgsize() + 13 + z.Header.ResponseHash.Msgsize() + 8 + z.Payload.Msgsize() return } @@ -41,8 +41,8 @@ func (z *Response) Msgsize() (s int) { func (z *ResponseHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 9 - o = append(o, 0x89) + // map header, size 10 + o = append(o, 0x8a) o = hsp.AppendInt64(o, z.AffectedRows) o = hsp.AppendInt64(o, z.LastInsertID) o = hsp.AppendUint64(o, z.LogOffset) @@ -66,6 +66,11 @@ func (z *ResponseHeader) MarshalHash() (o []byte, err error) { } else { o = hsp.AppendBytes(o, oTemp) } + if oTemp, err := z.ResponseAccount.MarshalHash(); err != nil { + return nil, err + } else { + o = hsp.AppendBytes(o, oTemp) + } o = hsp.AppendUint64(o, z.RowCount) o = hsp.AppendTime(o, z.Timestamp) return @@ -73,7 +78,7 @@ func (z *ResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *ResponseHeader) Msgsize() (s int) { - s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 12 + z.RequestHash.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize + s = 1 + 13 + hsp.Int64Size + 13 + hsp.Int64Size + 10 + hsp.Uint64Size + 7 + z.NodeID.Msgsize() + 12 + z.PayloadHash.Msgsize() + 8 + z.Request.Msgsize() + 12 + z.RequestHash.Msgsize() + 16 + z.ResponseAccount.Msgsize() + 9 + hsp.Uint64Size + 10 + hsp.TimeSize return } @@ -157,7 +162,7 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { o = hsp.Require(b, z.Msgsize()) // map header, size 2 o = append(o, 0x82) - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { + if oTemp, err := z.ResponseHash.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) @@ -172,6 +177,6 @@ func (z *SignedResponseHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *SignedResponseHeader) Msgsize() (s int) { - s = 1 + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 15 + z.ResponseHeader.Msgsize() + s = 1 + 13 + z.ResponseHash.Msgsize() + 15 + z.ResponseHeader.Msgsize() return } diff --git a/types/types_test.go b/types/types_test.go index 7f482bd00..2d243e2ee 100644 --- a/types/types_test.go +++ b/types/types_test.go @@ -153,8 +153,6 @@ func TestRequest_Sign(t *testing.T) { } func TestResponse_Sign(t *testing.T) { - privKey, _ := getCommKeys() - Convey("sign", t, func() { res := &Response{ Header: SignedResponseHeader{ @@ -213,7 +211,7 @@ func TestResponse_Sign(t *testing.T) { var err error // sign - err = res.Sign(privKey) + err = res.BuildHash() So(err, ShouldBeNil) // test hash @@ -222,7 +220,7 @@ func TestResponse_Sign(t *testing.T) { // verify Convey("verify", func() { - err = res.Verify() + err = res.BuildHash() So(err, ShouldBeNil) Convey("encode/decode verify", func() { @@ -231,25 +229,25 @@ func TestResponse_Sign(t *testing.T) { var r *Response err = utils.DecodeMsgPack(buf.Bytes(), &r) So(err, ShouldBeNil) - err = r.Verify() + err = r.VerifyHash() So(err, ShouldBeNil) }) Convey("request change", func() { res.Header.Request.BatchCount = 200 - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) Convey("payload change", func() { res.Payload.DeclTypes[0] = "INT" - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) Convey("header change", func() { res.Header.Timestamp = res.Header.Timestamp.Add(time.Second) - err = res.Verify() + err = res.VerifyHash() So(err, ShouldNotBeNil) }) }) diff --git a/types/xxx_test.go b/types/xxx_test.go index 49dcff7ec..7da8dffa6 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -267,7 +267,7 @@ func buildResponse(header *SignedRequestHeader, cols []string, types []string, r Rows: rows, }, } - if err = r.Sign(testingPrivateKey); err != nil { + if err = r.BuildHash(); err != nil { panic(err) } return diff --git a/worker/db.go b/worker/db.go index 70ae53986..fddf3775c 100644 --- a/worker/db.go +++ b/worker/db.go @@ -25,6 +25,7 @@ import ( "time" "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/kayak" @@ -84,6 +85,7 @@ type Database struct { nodeID proto.NodeID mux *DBKayakMuxService privateKey *asymmetric.PrivateKey + accountAddr proto.AccountAddress } // NewDatabase create a single database instance using config. @@ -105,6 +107,11 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, return } + var accountAddr proto.AccountAddress + if accountAddr, err = crypto.PubKeyHash(privateKey.PubKey()); err != nil { + return + } + // init database db = &Database{ cfg: cfg, @@ -112,6 +119,7 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, mux: cfg.KayakMux, connSeqEvictCh: make(chan uint64, 1), privateKey: privateKey, + accountAddr: accountAddr, } defer func() { @@ -272,27 +280,19 @@ func (db *Database) Query(request *types.Request) (response *types.Response, err return nil, errors.Wrap(ErrInvalidRequest, "invalid query type") } + response.Header.ResponseAccount = db.accountAddr + // build hash if err = response.BuildHash(); err != nil { err = errors.Wrap(err, "failed to build response hash") return } - func(privKey *asymmetric.PrivateKey, tracker *x.QueryTracker) { - response.SetResponseCallback(func(res *types.Response) { - var err error - // Sign response - if err = res.SignHash(privKey); err != nil { - log.WithError(err).Debug("failed to sign response") - return - } - if err = db.chain.AddResponse(&res.Header); err != nil { - log.WithError(err).Debug("failed to add response to index") - return - } - tracker.UpdateResp(res) - }) - }(db.privateKey, tracker) + if err = db.chain.AddResponse(&response.Header); err != nil { + log.WithError(err).Debug("failed to add response to index") + return + } + tracker.UpdateResp(response) return } diff --git a/worker/db_test.go b/worker/db_test.go index 12e634218..e8e83d58c 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -358,7 +358,6 @@ func TestSingleDatabase(t *testing.T) { res, err = db.Query(readQuery) So(err, ShouldBeNil) - res.TriggerResponseCallback() // wait for callback to sign signature time.Sleep(time.Millisecond * 10) diff --git a/worker/dbms_rpc.go b/worker/dbms_rpc.go index bbdcc518d..e3d5dda3b 100644 --- a/worker/dbms_rpc.go +++ b/worker/dbms_rpc.go @@ -95,7 +95,6 @@ func (rpc *DBMSRPCService) Query(req *types.Request, res *types.Response) (err e } *res = *r - r.TriggerResponseCallback() dbQuerySuccCounter.Mark(1) return diff --git a/xenomint/chain.go b/xenomint/chain.go index 8cecdab00..7f4e9fac3 100644 --- a/xenomint/chain.go +++ b/xenomint/chain.go @@ -84,7 +84,7 @@ func (c *Chain) Query(req *types.Request) (resp *types.Response, err error) { return } queried = time.Since(start) - if err = resp.Sign(c.priv); err != nil { + if err = resp.BuildHash(); err != nil { return } signed = time.Since(start)