From 3afba8012c839bd63551cd5341eaf1d69250d56a Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Feb 2019 17:36:19 +0800 Subject: [PATCH 001/244] Add v0.4.0 changelog --- CHANGELOG.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73544d80c..5795be024 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Changelog +## [v0.4.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.4.0) (2019-02-25) + +[Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.3.0...v0.4.0) + +**Merged pull requests:** + +- Use minimum assertion instead of exact version [\#265](https://github.com/CovenantSQL/CovenantSQL/pull/265) ([leventeliu](https://github.com/leventeliu)) +- Promote beta branch [\#264](https://github.com/CovenantSQL/CovenantSQL/pull/264) ([leventeliu](https://github.com/leventeliu)) +- Add imports format script and format all codes [\#263](https://github.com/CovenantSQL/CovenantSQL/pull/263) ([leventeliu](https://github.com/leventeliu)) +- Add listen flag for adapter [\#262](https://github.com/CovenantSQL/CovenantSQL/pull/262) ([auxten](https://github.com/auxten)) +- Use rpc broadcast for dht node info synchronization instead of kayak [\#261](https://github.com/CovenantSQL/CovenantSQL/pull/261) ([xq262144](https://github.com/xq262144)) +- Merge observer and explorer into cql [\#260](https://github.com/CovenantSQL/CovenantSQL/pull/260) ([auxten](https://github.com/auxten)) +- Add transaction hash as return value in create/drop [\#259](https://github.com/CovenantSQL/CovenantSQL/pull/259) ([leventeliu](https://github.com/leventeliu)) +- Combine createRandomBlock functions into one. [\#258](https://github.com/CovenantSQL/CovenantSQL/pull/258) ([laodouya](https://github.com/laodouya)) +- Improve unit test cover ratio [\#257](https://github.com/CovenantSQL/CovenantSQL/pull/257) ([laodouya](https://github.com/laodouya)) +- Refactor observer synchronization to pull mode [\#256](https://github.com/CovenantSQL/CovenantSQL/pull/256) ([xq262144](https://github.com/xq262144)) +- Refactor observer synchronization to pull mode [\#255](https://github.com/CovenantSQL/CovenantSQL/pull/255) ([xq262144](https://github.com/xq262144)) +- Fix SQLChain forks [\#254](https://github.com/CovenantSQL/CovenantSQL/pull/254) ([xq262144](https://github.com/xq262144)) +- Update beta with develop updates [\#253](https://github.com/CovenantSQL/CovenantSQL/pull/253) ([xq262144](https://github.com/xq262144)) +- Improve unit test cover ratio. [\#251](https://github.com/CovenantSQL/CovenantSQL/pull/251) ([laodouya](https://github.com/laodouya)) +- Add blocks cache limit for block producers [\#249](https://github.com/CovenantSQL/CovenantSQL/pull/249) ([leventeliu](https://github.com/leventeliu)) +- Support pprof http handler in BlockProducer [\#248](https://github.com/CovenantSQL/CovenantSQL/pull/248) ([xq262144](https://github.com/xq262144)) +- Unit all docker client config to same private.key [\#247](https://github.com/CovenantSQL/CovenantSQL/pull/247) ([laodouya](https://github.com/laodouya)) +- Make observer API writeTimeout = 10 \* readTimeout [\#246](https://github.com/CovenantSQL/CovenantSQL/pull/246) ([auxten](https://github.com/auxten)) + ## [v0.3.0](https://github.com/CovenantSQL/CovenantSQL/tree/v0.3.0) (2019-01-30) [Full Changelog](https://github.com/CovenantSQL/CovenantSQL/compare/v0.2.0...v0.3.0) From 5eb3fcd0a7ebd7140dec2fcc395b1d58b928e8cd Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:02:30 +0800 Subject: [PATCH 002/244] Use WaitForExit for signal waiting logic, and make cql -web wait for signal on non-tty mode --- cmd/cql-adapter/main.go | 8 +------- cmd/cql-explorer/main.go | 12 +----------- cmd/cql-faucet/main.go | 8 +------- cmd/cql-fuse/main.go | 12 +++--------- cmd/cql-minerd/main.go | 12 +----------- cmd/cql-mysql-adapter/main.go | 8 +------- cmd/cql-observer/main.go | 12 +----------- cmd/cql-utils/idminer.go | 13 ++----------- cmd/cql/main.go | 8 ++++++++ cmd/cqld/bootstrap.go | 16 +++------------- utils/signal.go | 35 +++++++++++++++++++++++++++++++++++ 11 files changed, 57 insertions(+), 87 deletions(-) create mode 100644 utils/signal.go diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go index aee2cfdde..f45382daf 100644 --- a/cmd/cql-adapter/main.go +++ b/cmd/cql-adapter/main.go @@ -21,12 +21,9 @@ import ( "flag" "fmt" "os" - "os/signal" "runtime" "time" - "golang.org/x/sys/unix" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -71,16 +68,13 @@ func main() { return } - stop := make(chan os.Signal, 1) - signal.Notify(stop, os.Interrupt, unix.SIGTERM) - log.Info("start adapter") if err = server.Serve(); err != nil { log.WithError(err).Fatal("start adapter failed") return } - <-stop + <-utils.WaitForExit() ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/cmd/cql-explorer/main.go b/cmd/cql-explorer/main.go index b1fbca26f..f2f1550ba 100644 --- a/cmd/cql-explorer/main.go +++ b/cmd/cql-explorer/main.go @@ -22,9 +22,7 @@ import ( "math/rand" "net/http" "os" - "os/signal" "runtime" - "syscall" "time" "github.com/CovenantSQL/CovenantSQL/client" @@ -99,15 +97,7 @@ func main() { return } - signalCh := make(chan os.Signal, 1) - signal.Notify( - signalCh, - syscall.SIGINT, - syscall.SIGTERM, - ) - signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) - - <-signalCh + <-utils.WaitForExit() // stop explorer api if err = stopAPI(httpServer); err != nil { diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index 333bbe77d..6debac8fd 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -22,12 +22,9 @@ import ( "fmt" "net/http" "os" - "os/signal" "runtime" "time" - "golang.org/x/sys/unix" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/utils" @@ -106,10 +103,7 @@ func main() { log.Info("started faucet") - stop := make(chan os.Signal, 1) - signal.Notify(stop, os.Interrupt, unix.SIGTERM) - - <-stop + <-utils.WaitForExit() // stop verifier v.stop() diff --git a/cmd/cql-fuse/main.go b/cmd/cql-fuse/main.go index c66401960..5eae842aa 100644 --- a/cmd/cql-fuse/main.go +++ b/cmd/cql-fuse/main.go @@ -70,7 +70,6 @@ import ( "flag" "fmt" "os" - "os/signal" "bazil.org/fuse" "bazil.org/fuse/fs" @@ -152,14 +151,9 @@ func main() { log.Infof("DB: %s mount on %s succeed", dsn, mountPoint) go func() { - sig := make(chan os.Signal, 1) - signal.Notify(sig, os.Interrupt) - for range sig { - if err := fuse.Unmount(mountPoint); err != nil { - log.Printf("Signal received, but could not unmount: %s", err) - } else { - break - } + <-utils.WaitForExit() + if err := fuse.Unmount(mountPoint); err != nil { + log.Printf("Signal received, but could not unmount: %s", err) } }() diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index b1170252c..129f16599 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -24,9 +24,7 @@ import ( "net/http" _ "net/http/pprof" "os" - "os/signal" "runtime" - "syscall" "time" graphite "github.com/cyberdelia/go-metrics-graphite" @@ -231,14 +229,6 @@ func main() { server.Stop() }() - signalCh := make(chan os.Signal, 1) - signal.Notify( - signalCh, - syscall.SIGINT, - syscall.SIGTERM, - ) - signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) - if metricLog { go metrics.Log(metrics.DefaultRegistry, 5*time.Second, log.StandardLogger()) } @@ -270,7 +260,7 @@ func main() { defer trace.Stop() } - <-signalCh + <-utils.WaitForExit() utils.StopProfile() log.Info("miner stopped") diff --git a/cmd/cql-mysql-adapter/main.go b/cmd/cql-mysql-adapter/main.go index f34419fde..81a851203 100644 --- a/cmd/cql-mysql-adapter/main.go +++ b/cmd/cql-mysql-adapter/main.go @@ -20,11 +20,8 @@ import ( "flag" "fmt" "os" - "os/signal" "runtime" - "golang.org/x/sys/unix" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/utils" @@ -79,9 +76,6 @@ func main() { return } - stop := make(chan os.Signal, 1) - signal.Notify(stop, os.Interrupt, unix.SIGTERM) - server, err := NewServer(listenAddr, mysqlUser, mysqlPassword) if err != nil { log.WithError(err).Fatal("init server failed") @@ -92,7 +86,7 @@ func main() { log.Info("start mysql adapter") - <-stop + <-utils.WaitForExit() server.Shutdown() diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index 841d7a991..bbf7f9801 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -21,9 +21,7 @@ import ( "fmt" "math/rand" "os" - "os/signal" "runtime" - "syscall" "time" "github.com/CovenantSQL/CovenantSQL/conf" @@ -66,14 +64,6 @@ func main() { os.Exit(0) } - signalCh := make(chan os.Signal, 1) - signal.Notify( - signalCh, - syscall.SIGINT, - syscall.SIGTERM, - ) - signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) - configFile = utils.HomeDirExpand(configFile) flag.Visit(func(f *flag.Flag) { @@ -92,7 +82,7 @@ func main() { log.WithError(err).Fatal("start observer failed") } - <-signalCh + <-utils.WaitForExit() _ = observer.StopObserver(service, httpServer) log.Info("observer stopped") diff --git a/cmd/cql-utils/idminer.go b/cmd/cql-utils/idminer.go index b99da343d..caa526434 100644 --- a/cmd/cql-utils/idminer.go +++ b/cmd/cql-utils/idminer.go @@ -23,15 +23,14 @@ import ( "math" "math/rand" "os" - "os/signal" "runtime" - "syscall" "time" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" mine "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -69,14 +68,6 @@ func runMiner() { publicKey = privateKey.PubKey() } - signalCh := make(chan os.Signal, 1) - signal.Notify( - signalCh, - syscall.SIGINT, - syscall.SIGTERM, - ) - signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) - cpuCount := runtime.NumCPU() log.Infof("cpu: %#v\n", cpuCount) nonceChs := make([]chan mine.NonceInfo, cpuCount) @@ -102,7 +93,7 @@ func runMiner() { }(i) } - sig := <-signalCh + sig := <-utils.WaitForExit() log.Infof("received signal %#v\n", sig) for i := 0; i < cpuCount; i++ { close(stopChs[i]) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 807e6f4ff..527d68011 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -33,6 +33,7 @@ import ( "runtime" "strconv" "strings" + "syscall" "time" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" @@ -44,6 +45,7 @@ import ( "github.com/xo/usql/handler" "github.com/xo/usql/rline" "github.com/xo/usql/text" + "golang.org/x/crypto/ssh/terminal" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" @@ -321,6 +323,12 @@ func main() { } } + // if stdin is not tty and web flag is enabled + if !terminal.IsTerminal(syscall.Stdin) && explorerAddr != "" { + <-utils.WaitForExit() + return + } + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index f77e51a82..7092445b3 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -18,8 +18,6 @@ package main import ( "fmt" - "os" - "os/signal" "syscall" "time" @@ -34,12 +32,12 @@ import ( "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) const ( - dhtGossipServiceName = "DHTG" - dhtGossipTimeout = time.Second * 20 + dhtGossipTimeout = time.Second * 20 ) func runNode(nodeID proto.NodeID, listenAddr string) (err error) { @@ -177,15 +175,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { }() } - signalCh := make(chan os.Signal, 1) - signal.Notify( - signalCh, - syscall.SIGINT, - syscall.SIGTERM, - ) - signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) - - <-signalCh + <-utils.WaitForExit() return } diff --git a/utils/signal.go b/utils/signal.go new file mode 100644 index 000000000..1dfd31d1a --- /dev/null +++ b/utils/signal.go @@ -0,0 +1,35 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "os" + "os/signal" + "syscall" +) + +// WaitForExit waits for user cancellation signals: SIGINT/SIGTERM/SIGHUP/SIGTTIN/SIGTTOU. +func WaitForExit() <-chan os.Signal { + signalCh := make(chan os.Signal, 1) + signal.Notify( + signalCh, + syscall.SIGINT, + syscall.SIGTERM, + ) + signal.Ignore(syscall.SIGHUP, syscall.SIGTTIN, syscall.SIGTTOU) + return signalCh +} From 2ac373334f9a0a83fee57a8a195f3c0741b50190 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:05:50 +0800 Subject: [PATCH 003/244] Remove trailing dot in cgo include statement --- metric/loadavg_unix.go | 2 +- metric/meminfo_darwin.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/metric/loadavg_unix.go b/metric/loadavg_unix.go index 16bda06db..ea3b76836 100644 --- a/metric/loadavg_unix.go +++ b/metric/loadavg_unix.go @@ -20,7 +20,7 @@ import ( "errors" ) -// #include . +// #include import "C" func getLoad() ([]float64, error) { diff --git a/metric/meminfo_darwin.go b/metric/meminfo_darwin.go index 2e85a61f0..121479495 100644 --- a/metric/meminfo_darwin.go +++ b/metric/meminfo_darwin.go @@ -2,7 +2,7 @@ package metric -// #include . +// #include import "C" import ( From c7470b72acf35fb71d2a25a905e9b5312df66b21 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:08:36 +0800 Subject: [PATCH 004/244] Add CORS to observer api --- sqlchain/observer/api.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/sqlchain/observer/api.go b/sqlchain/observer/api.go index 2e7cf426f..cc20e7eb8 100644 --- a/sqlchain/observer/api.go +++ b/sqlchain/observer/api.go @@ -26,6 +26,7 @@ import ( "strconv" "time" + "github.com/gorilla/handlers" "github.com/gorilla/mux" "github.com/rakyll/statik/fs" @@ -668,15 +669,15 @@ func startAPI(service *Service, listenAddr string, version string) (server *http } router := mux.NewRouter() - fs := http.FileServer(statikFS) - router.Handle("/", fs) - router.Handle("/static/{type}/{file}", fs) + fsh := http.FileServer(statikFS) + router.Handle("/", fsh) + router.Handle("/static/{type}/{file}", fsh) router.PathPrefix("/dbs").HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { r2 := new(http.Request) *r2 = *request r2.URL = new(url.URL) r2.URL.Path = "/" - fs.ServeHTTP(writer, r2) + fsh.ServeHTTP(writer, r2) }) router.HandleFunc("/version", func(rw http.ResponseWriter, r *http.Request) { sendResponse(http.StatusOK, true, nil, map[string]interface{}{ @@ -714,7 +715,7 @@ func startAPI(service *Service, listenAddr string, version string) (server *http WriteTimeout: apiTimeout * 10, ReadTimeout: apiTimeout, IdleTimeout: apiTimeout, - Handler: router, + Handler: handlers.CORS()(router), } go func() { From 595b43bc80a81402edd895e92a3fe6b91eefbc3c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:19:04 +0800 Subject: [PATCH 005/244] Wait for signal if web mode is enabled --- cmd/cql/main.go | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 527d68011..07206115c 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -33,7 +33,6 @@ import ( "runtime" "strconv" "strings" - "syscall" "time" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" @@ -45,7 +44,6 @@ import ( "github.com/xo/usql/handler" "github.com/xo/usql/rline" "github.com/xo/usql/text" - "golang.org/x/crypto/ssh/terminal" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" @@ -323,12 +321,6 @@ func main() { } } - // if stdin is not tty and web flag is enabled - if !terminal.IsTerminal(syscall.Stdin) && explorerAddr != "" { - <-utils.WaitForExit() - return - } - // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. @@ -592,6 +584,12 @@ func main() { os.Exit(-1) return } + + // if web flag is enabled + if explorerAddr != "" { + <-utils.WaitForExit() + return + } } func wait(txHash hash.Hash) (err error) { From 6ee542e4482c6a164bd10d9c9451a21b62e4ac86 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:27:14 +0800 Subject: [PATCH 006/244] Update comment in WaitForExit --- utils/signal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/signal.go b/utils/signal.go index 1dfd31d1a..c348d8ea1 100644 --- a/utils/signal.go +++ b/utils/signal.go @@ -22,7 +22,7 @@ import ( "syscall" ) -// WaitForExit waits for user cancellation signals: SIGINT/SIGTERM/SIGHUP/SIGTTIN/SIGTTOU. +// WaitForExit waits for user cancellation signals: SIGINT/SIGTERM and ignore SIGHUP/SIGTTIN/SIGTTOU. func WaitForExit() <-chan os.Signal { signalCh := make(chan os.Signal, 1) signal.Notify( From 9ab6bc07cc6721438789a1f39d3a6e2009a18d2f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:31:26 +0800 Subject: [PATCH 007/244] Add missing error check in addBlock --- sqlchain/observer/service.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sqlchain/observer/service.go b/sqlchain/observer/service.go index 6713000b6..d8a4b2e8d 100644 --- a/sqlchain/observer/service.go +++ b/sqlchain/observer/service.go @@ -306,6 +306,10 @@ func (s *Service) addQueryTracker(dbID proto.DatabaseID, height int32, offset in func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *types.Block) (err error) { instance, err := s.getUpstream(dbID) + if err != nil { + return + } + h := int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / conf.GConf.SQLChainPeriod) key := utils.ConcatAll(int32ToBytes(h), b.BlockHash().AsBytes(), int32ToBytes(count)) // It's actually `countToBytes` From 6bf90a58426e1bbf129702f09465cc2381d33bd6 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 26 Feb 2019 21:54:30 +0800 Subject: [PATCH 008/244] Fix bin/cql -web and -dsn flag interference --- cmd/cql-observer/main.go | 4 ++++ .../observer => cmd/cql-observer}/node.go | 4 ++-- cmd/cql/main.go | 20 +++++++------------ sqlchain/observer/observer.go | 5 ----- sqlchain/observer/service.go | 3 +++ 5 files changed, 16 insertions(+), 20 deletions(-) rename {sqlchain/observer => cmd/cql-observer}/node.go (95%) diff --git a/cmd/cql-observer/main.go b/cmd/cql-observer/main.go index bbf7f9801..747ae6583 100644 --- a/cmd/cql-observer/main.go +++ b/cmd/cql-observer/main.go @@ -77,6 +77,10 @@ func main() { } kms.InitBP() + if err = initNode(); err != nil { + return + } + service, httpServer, err := observer.StartObserver(listenAddr, version) if err != nil { log.WithError(err).Fatal("start observer failed") diff --git a/sqlchain/observer/node.go b/cmd/cql-observer/node.go similarity index 95% rename from sqlchain/observer/node.go rename to cmd/cql-observer/node.go index 369d4fb77..c49738de7 100644 --- a/sqlchain/observer/node.go +++ b/cmd/cql-observer/node.go @@ -1,5 +1,5 @@ /* - * Copyright 2018 The CovenantSQL Authors. + * Copyright 2019 The CovenantSQL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ * limitations under the License. */ -package observer +package main import ( "fmt" diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 07206115c..9d9cd68f2 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -292,13 +292,14 @@ func main() { configFile = utils.HomeDirExpand(configFile) - if explorerAddr != "" { - var err error - conf.GConf, err = conf.LoadConfig(configFile) - if err != nil { - log.WithField("config", configFile).WithError(err).Fatal("load config failed") - } + // init covenantsql driver + if err = client.Init(configFile, []byte(password)); err != nil { + cLog.WithError(err).Error("init covenantsql client failed") + os.Exit(-1) + return + } + if explorerAddr != "" { service, httpServer, err = observer.StartObserver(explorerAddr, version) if err != nil { log.WithError(err).Fatal("start explorer failed") @@ -312,13 +313,6 @@ func main() { _ = observer.StopObserver(service, httpServer) log.Info("explorer stopped") }() - } else { - // init covenantsql driver - if err = client.Init(configFile, []byte(password)); err != nil { - cLog.WithError(err).Error("init covenantsql client failed") - os.Exit(-1) - return - } } // TODO(leventeliu): discover more specific confirmation duration from config. We don't have diff --git a/sqlchain/observer/observer.go b/sqlchain/observer/observer.go index bfde4ad9e..79de69f61 100644 --- a/sqlchain/observer/observer.go +++ b/sqlchain/observer/observer.go @@ -63,11 +63,6 @@ func stopService(service *Service) (err error) { // StartObserver starts the observer service and http API server. func StartObserver(listenAddr string, version string) (service *Service, httpServer *http.Server, err error) { - // init node - if err = initNode(); err != nil { - log.WithError(err).Fatal("init node failed") - } - // start service if service, err = startService(); err != nil { log.WithError(err).Fatal("start observation failed") diff --git a/sqlchain/observer/service.go b/sqlchain/observer/service.go index d8a4b2e8d..7bf244360 100644 --- a/sqlchain/observer/service.go +++ b/sqlchain/observer/service.go @@ -309,6 +309,9 @@ func (s *Service) addBlock(dbID proto.DatabaseID, count int32, b *types.Block) ( if err != nil { return } + if b == nil { + return + } h := int32(b.Timestamp().Sub(instance.GenesisBlock.Timestamp()) / conf.GConf.SQLChainPeriod) key := utils.ConcatAll(int32ToBytes(h), b.BlockHash().AsBytes(), int32ToBytes(count)) From 1be7a59ddfd4e4883984b9174eb32bb8f1386d65 Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 27 Feb 2019 00:41:14 +0800 Subject: [PATCH 009/244] Ctrl + C to stop explorer --- cmd/cql/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 9d9cd68f2..8d2edd90e 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -581,6 +581,7 @@ func main() { // if web flag is enabled if explorerAddr != "" { + fmt.Printf("Ctrl + C to stop explorer listen on %s", explorerAddr) <-utils.WaitForExit() return } From a1dda7ce5397218fde234c77d3aecb3ff364daa3 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Feb 2019 17:12:21 +0800 Subject: [PATCH 010/244] Remove cql-observer bin file and all related code. --- Makefile | 51 ++++---------- alltest.sh | 2 +- bin/docker-entry.sh | 4 +- cmd/cql-observer/main.go | 95 --------------------------- cmd/cql-observer/main_test.go | 26 -------- docker-compose.yml | 3 +- docker/observer.Dockerfile | 28 -------- sqlchain/observer/observation_test.go | 25 +++---- 8 files changed, 29 insertions(+), 205 deletions(-) delete mode 100644 cmd/cql-observer/main.go delete mode 100644 cmd/cql-observer/main_test.go delete mode 100644 docker/observer.Dockerfile diff --git a/Makefile b/Makefile index e1bf36bce..46b69caf7 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,6 @@ use_all_cores: BUILDER := covenantsql/covenantsql-builder IMAGE := covenantsql/covenantsql -OB_IMAGE := covenantsql/covenantsql-observer GIT_COMMIT ?= $(shell git rev-parse --short HEAD) GIT_DIRTY ?= $(shell test -n "`git status --porcelain`" && echo "+CHANGES" || true) @@ -50,24 +49,13 @@ runner: builder -f docker/Dockerfile \ . -observer_docker: builder - docker build \ - --tag $(OB_IMAGE):$(VERSION) \ - --tag $(OB_IMAGE):latest \ - --build-arg COMMIT=$(COMMIT) \ - --build-arg VERSION=$(VERSION) \ - -f docker/observer.Dockerfile \ - . - -docker: runner observer_docker +docker: runner docker_clean: status docker rmi -f $(BUILDER):latest docker rmi -f $(IMAGE):latest - docker rmi -f $(OB_IMAGE):latest docker rmi -f $(BUILDER):$(VERSION) docker rmi -f $(IMAGE):$(VERSION) - docker rmi -f $(OB_IMAGE):$(VERSION) save: status @@ -89,27 +77,19 @@ logs: docker-compose logs -f --tail=10 push_testnet: - docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):testnet - docker push $(OB_IMAGE):testnet docker tag $(IMAGE):$(VERSION) $(IMAGE):testnet docker push $(IMAGE):testnet push_bench: - docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):bench - docker push $(OB_IMAGE):bench docker tag $(IMAGE):$(VERSION) $(IMAGE):bench docker push $(IMAGE):bench push_staging: - docker tag $(OB_IMAGE):$(VERSION) $(OB_IMAGE):staging - docker push $(OB_IMAGE):staging docker tag $(IMAGE):$(VERSION) $(IMAGE):staging docker push $(IMAGE):staging push: - docker push $(OB_IMAGE):$(VERSION) - docker push $(OB_IMAGE):latest docker push $(IMAGE):$(VERSION) docker push $(IMAGE):latest @@ -162,18 +142,6 @@ bin/cql-minerd: -o bin/cql-minerd \ github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd -bin/cql-observer.test: - $(GOTEST) \ - -ldflags "$(ldflags_role_client)" \ - -o bin/cql-observer.test \ - github.com/CovenantSQL/CovenantSQL/cmd/cql-observer - -bin/cql-observer: - $(GOBUILD) \ - -ldflags "$(ldflags_role_client)" \ - -o bin/cql-observer \ - github.com/CovenantSQL/CovenantSQL/cmd/cql-observer - bin/cql-utils: $(GOBUILD) \ -ldflags "$(ldflags_role_client_simple_log)" \ @@ -186,6 +154,13 @@ bin/cql: -o bin/cql \ github.com/CovenantSQL/CovenantSQL/cmd/cql +bin/cql.test: + $(GOTEST) \ + -ldflags "$(ldflags_role_client)" \ + -o bin/cql.test \ + github.com/CovenantSQL/CovenantSQL/cmd/cql + + bin/cql-fuse: $(GOBUILD) \ -ldflags "$(ldflags_role_client_simple_log)" \ @@ -220,11 +195,9 @@ bp: bin/cqld.test bin/cqld miner: bin/cql-minerd.test bin/cql-minerd -observer: bin/cql-observer.test bin/cql-observer - -client: bin/cql-utils bin/cql bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer +client: bin/cql-utils bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer -all: bp miner observer client +all: bp miner client clean: rm -rf bin/cql* @@ -232,5 +205,5 @@ clean: rm -f coverage.txt .PHONY: status start stop logs push push_testnet clean \ - bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils bin/cql-observer bin/cql-observer.test \ - bin/cql bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer + bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils \ + bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer diff --git a/alltest.sh b/alltest.sh index b762c25d6..60e656091 100755 --- a/alltest.sh +++ b/alltest.sh @@ -6,7 +6,7 @@ set -o nounset main() { make clean - make -j6 bp miner observer + make -j5 bp miner bin/cql.test go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index cb7619ed8..bd32503ad 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -12,9 +12,7 @@ blockproducer) exec /app/cqld -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; observer) - MAGIC_DOLLAR='$' envsubst < /etc/nginx/conf.d/servers/explorer.conf.template > /etc/nginx/conf.d/default.conf - nginx -g 'daemon off;' Date: Tue, 26 Feb 2019 17:30:00 +0800 Subject: [PATCH 011/244] Remove cmd/explorer binary. --- Makefile | 10 +- bin/docker-entry.sh | 3 - cmd/cql-explorer/README.md | 182 -------------- cmd/cql-explorer/api.go | 328 ------------------------- cmd/cql-explorer/errors.go | 32 --- cmd/cql-explorer/main.go | 115 --------- cmd/cql-explorer/service.go | 460 ------------------------------------ 7 files changed, 2 insertions(+), 1128 deletions(-) delete mode 100644 cmd/cql-explorer/README.md delete mode 100644 cmd/cql-explorer/api.go delete mode 100644 cmd/cql-explorer/errors.go delete mode 100644 cmd/cql-explorer/main.go delete mode 100644 cmd/cql-explorer/service.go diff --git a/Makefile b/Makefile index 46b69caf7..0276036c6 100644 --- a/Makefile +++ b/Makefile @@ -185,17 +185,11 @@ bin/cql-faucet: -o bin/cql-faucet \ github.com/CovenantSQL/CovenantSQL/cmd/cql-faucet -bin/cql-explorer: - $(GOBUILD) \ - -ldflags "$(ldflags_role_client)" \ - -o bin/cql-explorer \ - github.com/CovenantSQL/CovenantSQL/cmd/cql-explorer - bp: bin/cqld.test bin/cqld miner: bin/cql-minerd.test bin/cql-minerd -client: bin/cql-utils bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer +client: bin/cql-utils bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet all: bp miner client @@ -206,4 +200,4 @@ clean: .PHONY: status start stop logs push push_testnet clean \ bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils \ - bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet bin/cql-explorer + bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index bd32503ad..41d618b60 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -26,8 +26,5 @@ cli) faucet) exec /app/cql-faucet -config ${COVENANT_CONF} "${@}" ;; -explorer) - exec /app/cql-explorer -config ${COVENANT_CONF} "${@}" - ;; esac diff --git a/cmd/cql-explorer/README.md b/cmd/cql-explorer/README.md deleted file mode 100644 index 3062e854d..000000000 --- a/cmd/cql-explorer/README.md +++ /dev/null @@ -1,182 +0,0 @@ -This doc introduce the usage of CovenantSQL block producer chain explorer server. - -## Prerequisites - -Make sure the ```$GOPATH/bin``` is in your ```$PATH```, download/build the explorer binary. - -```shell -$ go get github.com/CovenantSQL/CovenantSQL/cmd/cql-explorer -``` - -Adapter requires a CovenantSQL ```config.yaml``` which can by generated by configuration generator. - -### Generating Default Config File - -Generate the main configuration file. Same as [Generating Default Config File in Golang Client Doc](https://github.com/CovenantSQL/CovenantSQL/tree/develop/client#generating-default-config-file). An existing configuration file can also be used. - -## Explorer Usage - -### Start - -Start the explorer by following commands: - -```shell -$ cql-explorer -``` - -The available options are: - -```shell -$ cql-explorer --help -Usage of cql-explorer: - -config string - Config file path (default "~/.cql/config.yaml") - -interval duration - New block check interval for explorer (default 2s) - -listen string - Listen address for http explorer api (default "127.0.0.1:4665") - -password string - Master key password for covenantsql -``` - -### API - -#### Query Synced Head Block - -**GET** /v1/head - -##### Request - -##### Response - -```json -{ - "success" : true, - "status" : "ok", - "data" : { - "block" : { - "txs" : [], - "hash" : "20ba21af54da3e17252fb4b6b7331fb6f36aca1b9b793597af6ef46faad34dea", - "timestamp" : 1540278575120.34, - "version" : 1, - "count" : 561, - "producer" : "8d7604acfdb391891a4c795f0939425b6d58bd50a81e579d15f06ecd381ad549", - "height" : 3040488, - "parent" : "f9c4f9c7a1dcbcf14a13eb91a007b33672f1f8a261738f5a25fee27c3ccaa584" - } - } -} -``` - -#### Query Block by _COUNT_ - -**GET** /v1/count/{count} - -##### Request - -__count__: count of specified block, 0 for genesis block - -##### Response - -```json -{ - "success" : true, - "status" : "ok", - "data" : { - "block" : { - "version" : 1, - "height" : 0, - "count" : 0, - "timestamp" : 1534197599120, - "hash" : "f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154", - "txs" : [], - "parent" : "0000000000000000000000000000000000000000000000000000000000000001", - "producer" : "0000000000000000000000000000000000000000000000000000000000000001" - } - } -} -``` - -#### Query Block by _HASH_ - -**GET** /v1/block/{hash} - -##### Request - -__hash__: hash of specified block - -##### Response - -```json -{ - "success" : true, - "status" : "ok", - "data" : { - "block" : { - "version" : 1, - "height" : 0, - "count" : 0, - "timestamp" : 1534197599120, - "hash" : "f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154", - "txs" : [], - "parent" : "0000000000000000000000000000000000000000000000000000000000000001", - "producer" : "0000000000000000000000000000000000000000000000000000000000000001" - } - } -} -``` - -#### Query Block by _HEIGHT_ - -**GET** /v1/height/{height} - -##### Request - -__height__: height of specified block, 0 for genesis block (height is related to block produce time and interval) - -##### Response - -```json -{ - "success" : true, - "status" : "ok", - "data" : { - "block" : { - "version" : 1, - "height" : 0, - "count" : 0, - "timestamp" : 1534197599120, - "hash" : "f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154", - "txs" : [], - "parent" : "0000000000000000000000000000000000000000000000000000000000000001", - "producer" : "0000000000000000000000000000000000000000000000000000000000000001" - } - } -} -``` - -#### Query Transaction by _HASH_ - -**GET** /v1/tx/{hash} - -##### Request - -__hash__: hash of specified tx - -##### Response - -```json -{ - "success": true, - "status": "ok", - "data": { - "tx": { - "nonce": 11616, - "amount": 1225, - "sender": "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9", - "receiver": "676b12fef8732ac78a97ea5dba0977bbbabc48f64eee66f09be89a589297e567", - "type": "Transfer" - } - } -} -``` diff --git a/cmd/cql-explorer/api.go b/cmd/cql-explorer/api.go deleted file mode 100644 index dcbca8218..000000000 --- a/cmd/cql-explorer/api.go +++ /dev/null @@ -1,328 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "strconv" - "time" - - "github.com/gorilla/mux" - - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - pt "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - apiTimeout = time.Second * 10 -) - -func sendResponse(code int, success bool, msg interface{}, data interface{}, rw http.ResponseWriter) { - msgStr := "ok" - if msg != nil { - msgStr = fmt.Sprint(msg) - } - rw.WriteHeader(code) - json.NewEncoder(rw).Encode(map[string]interface{}{ - "status": msgStr, - "success": success, - "data": data, - }) -} - -func sendError(err error, rw http.ResponseWriter) { - if err == ErrNotFound { - sendResponse(404, false, err, nil, rw) - } else if err == ErrBadRequest { - sendResponse(400, false, err, nil, rw) - } else if err != nil { - sendResponse(500, false, err, nil, rw) - } else { - sendResponse(200, true, nil, nil, rw) - } -} - -func getUintFromVars(field string, r *http.Request) (value uint32, err error) { - vars := mux.Vars(r) - valueStr := vars[field] - if valueStr == "" { - err = ErrBadRequest - return - } - - valueUint, err := strconv.ParseUint(valueStr, 10, 32) - if err != nil { - return - } - - value = uint32(valueUint) - - return -} - -type explorerAPI struct { - service *Service -} - -func (a *explorerAPI) GetHighestBlock(rw http.ResponseWriter, r *http.Request) { - count, err := a.service.getHighestCount() - if err != nil { - sendError(err, rw) - return - } - - block, _, height, err := a.service.getBlockByCount(count) - if err != nil { - sendError(err, rw) - return - } - - sendResponse(200, true, nil, a.formatBlock(count, height, block), rw) -} - -func (a *explorerAPI) GetBlockByCount(rw http.ResponseWriter, r *http.Request) { - count, err := getUintFromVars("count", r) - if err != nil { - sendError(err, rw) - return - } - - block, _, height, err := a.service.getBlockByCount(count) - if err != nil { - sendError(err, rw) - return - } - - sendResponse(200, true, nil, a.formatBlock(count, height, block), rw) -} - -func (a *explorerAPI) GetBlockByHeight(rw http.ResponseWriter, r *http.Request) { - height, err := getUintFromVars("height", r) - if err != nil { - sendError(err, rw) - return - } - - block, count, _, err := a.service.getBlockByHeight(height) - if err != nil { - sendError(err, rw) - return - } - - sendResponse(200, true, nil, a.formatBlock(count, height, block), rw) -} - -func (a *explorerAPI) GetBlockByHash(rw http.ResponseWriter, r *http.Request) { - h, err := a.getHash(r) - if err != nil { - sendError(err, rw) - return - } - - block, count, height, err := a.service.getBlockByHash(h) - if err != nil { - sendError(err, rw) - return - } - - sendResponse(200, true, nil, a.formatBlock(count, height, block), rw) -} - -func (a *explorerAPI) GetTxByHash(rw http.ResponseWriter, r *http.Request) { - h, err := a.getHash(r) - if err != nil { - sendError(err, rw) - return - } - - tx, count, height, err := a.service.getTxByHash(h) - if err != nil { - sendError(err, rw) - return - } - - sendResponse(200, true, nil, a.formatTx(count, height, tx), rw) -} - -func (a *explorerAPI) formatTime(t time.Time) float64 { - return float64(t.UnixNano()) / 1e6 -} - -func (a *explorerAPI) formatBlock(count uint32, height uint32, b *pt.BPBlock) map[string]interface{} { - txs := make([]map[string]interface{}, 0, len(b.Transactions)) - - for _, tx := range b.Transactions { - txs = append(txs, a.formatRawTx(tx)) - } - - return map[string]interface{}{ - "block": map[string]interface{}{ - "height": height, - "count": count, - "hash": b.BlockHash().String(), - "parent": b.ParentHash().String(), - "timestamp": a.formatTime(b.Timestamp()), - "version": b.SignedHeader.Version, - "producer": b.SignedHeader.Producer.String(), - "txs": txs, - }, - } -} - -func (a *explorerAPI) formatRawTx(t pi.Transaction) (res map[string]interface{}) { - if t == nil { - return nil - } - - switch tx := t.(type) { - case *pt.Transfer: - res = map[string]interface{}{ - "nonce": tx.Nonce, - "sender": tx.Sender.String(), - "receiver": tx.Receiver.String(), - "amount": tx.Amount, - } - case *pt.Billing: - res = a.formatTxBilling(tx) - case *pt.BaseAccount: - res = map[string]interface{}{ - "next_nonce": tx.NextNonce, - "address": tx.Address, - "stable_balance": tx.TokenBalance[pt.Particle], - "covenant_balance": tx.TokenBalance[pt.Wave], - "rating": tx.Rating, - } - case *pi.TransactionWrapper: - res = a.formatRawTx(tx.Unwrap()) - return - default: - // for unknown transactions - if txBytes, err := json.Marshal(tx); err != nil { - res = map[string]interface{}{ - "error": err.Error(), - } - } else if err = json.Unmarshal(txBytes, &res); err != nil { - res = map[string]interface{}{ - "error": err.Error(), - } - } - } - - res["type"] = t.GetTransactionType().String() - - return -} - -func (a *explorerAPI) formatTxBilling(tx *pt.Billing) (res map[string]interface{}) { - if tx == nil { - return - } - - return map[string]interface{}{ - "nonce": tx.Nonce, - "producer": tx.Producer.String(), - "billing_request": func(br pt.BillingRequest) map[string]interface{} { - return map[string]interface{}{ - "database_id": br.Header.DatabaseID, - "low_block": br.Header.LowBlock.String(), - "low_height": br.Header.LowHeight, - "high_block": br.Header.HighBlock.String(), - "high_height": br.Header.HighHeight, - "gas_amounts": func(gasAmounts []*proto.AddrAndGas) (d []map[string]interface{}) { - for _, g := range gasAmounts { - d = append(d, map[string]interface{}{ - "address": g.AccountAddress.String(), - "node": g.RawNodeID.String(), - "amount": g.GasAmount, - }) - } - return - }(br.Header.GasAmounts), - } - }(tx.BillingRequest), - "receivers": func(receivers []*proto.AccountAddress) (s []string) { - for _, r := range receivers { - s = append(s, r.String()) - } - return - }(tx.Receivers), - "fees": tx.Fees, - "rewards": tx.Rewards, - } -} - -func (a *explorerAPI) formatTx(count uint32, height uint32, tx pi.Transaction) map[string]interface{} { - var res map[string]interface{} - - if res = a.formatRawTx(tx); res != nil { - res["height"] = height - res["count"] = count - } - - return map[string]interface{}{ - "tx": res, - } -} - -func (a *explorerAPI) getHash(r *http.Request) (h *hash.Hash, err error) { - vars := mux.Vars(r) - hStr := vars["hash"] - return hash.NewHashFromStr(hStr) -} - -func startAPI(service *Service, listenAddr string) (server *http.Server, err error) { - router := mux.NewRouter() - router.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { - sendResponse(http.StatusOK, true, nil, nil, rw) - }).Methods("GET") - - api := &explorerAPI{ - service: service, - } - v1Router := router.PathPrefix("/v1").Subrouter() - v1Router.HandleFunc("/tx/{hash}", api.GetTxByHash).Methods("GET") - v1Router.HandleFunc("/height/{height:[0-9]+}", api.GetBlockByHeight).Methods("GET") - v1Router.HandleFunc("/block/{hash}", api.GetBlockByHash).Methods("GET") - v1Router.HandleFunc("/count/{count:[0-9]+}", api.GetBlockByCount).Methods("GET") - v1Router.HandleFunc("/head", api.GetHighestBlock).Methods("GET") - - server = &http.Server{ - Addr: listenAddr, - WriteTimeout: apiTimeout, - ReadTimeout: apiTimeout, - IdleTimeout: apiTimeout, - Handler: router, - } - - go func() { - if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { - log.WithError(err).Fatal("start api server failed") - } - }() - - return server, err -} - -func stopAPI(server *http.Server) (err error) { - return server.Shutdown(context.Background()) -} diff --git a/cmd/cql-explorer/errors.go b/cmd/cql-explorer/errors.go deleted file mode 100644 index 22937dac8..000000000 --- a/cmd/cql-explorer/errors.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import "github.com/pkg/errors" - -var ( - // ErrNilBlock represents nil block received. - ErrNilBlock = errors.New("nil block received") - // ErrNilTransaction represents nil transaction received. - ErrNilTransaction = errors.New("nil transaction received") - // ErrStopped defines error on explorer service has already stopped - ErrStopped = errors.New("explorer service has stopped") - // ErrNotFound defines error on failed to found specified resource - ErrNotFound = errors.New("resource not found") - // ErrBadRequest defines errors on error input field. - ErrBadRequest = errors.New("request field not fulfilled") -) diff --git a/cmd/cql-explorer/main.go b/cmd/cql-explorer/main.go deleted file mode 100644 index f2f1550ba..000000000 --- a/cmd/cql-explorer/main.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "flag" - "fmt" - "math/rand" - "net/http" - "os" - "runtime" - "time" - - "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -const name = "cql-explorer" - -var ( - version = "unknown" -) - -var ( - // config - configFile string - password string - listenAddr string - checkInterval time.Duration - showVersion bool -) - -func init() { - flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file path") - flag.StringVar(&listenAddr, "listen", "127.0.0.1:4665", "Listen address for http explorer api") - flag.DurationVar(&checkInterval, "interval", time.Second*2, "New block check interval for explorer") - flag.StringVar(&password, "password", "", "Master key password for covenantsql") - flag.BoolVar(&showVersion, "version", false, "Show version information and exit") -} - -func main() { - // set random - rand.Seed(time.Now().UnixNano()) - log.SetLevel(log.DebugLevel) - flag.Parse() - if showVersion { - fmt.Printf("%v %v %v %v %v\n", - name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) - os.Exit(0) - } - - configFile = utils.HomeDirExpand(configFile) - - flag.Visit(func(f *flag.Flag) { - log.Infof("args %#v : %s", f.Name, f.Value) - }) - - // init client - var err error - if err = client.Init(configFile, []byte(password)); err != nil { - log.WithError(err).Fatal("init node failed") - return - } - - // start service - var service *Service - if service, err = NewService(checkInterval); err != nil { - log.WithError(err).Fatal("init service failed") - return - } - - // start api - var httpServer *http.Server - if httpServer, err = startAPI(service, listenAddr); err != nil { - log.WithError(err).Fatal("start explorer api failed") - return - } - - // start subscription - if err = service.start(); err != nil { - log.WithError(err).Fatal("start service failed") - return - } - - <-utils.WaitForExit() - - // stop explorer api - if err = stopAPI(httpServer); err != nil { - log.WithError(err).Fatal("stop explorer api failed") - return - } - - // stop subscription - if err = service.stop(); err != nil { - log.WithError(err).Fatal("stop service failed") - return - } - - log.Info("explorer stopped") -} diff --git a/cmd/cql-explorer/service.go b/cmd/cql-explorer/service.go deleted file mode 100644 index 55233c66f..000000000 --- a/cmd/cql-explorer/service.go +++ /dev/null @@ -1,460 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bytes" - "encoding/binary" - "path/filepath" - "sync" - "sync/atomic" - "time" - - "github.com/syndtr/goleveldb/leveldb" - "github.com/syndtr/goleveldb/leveldb/util" - - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/rpc" - pt "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -const ( - dbFileName = "explorer.db" -) - -var ( - // storage keys - blockKeyPrefix = []byte("BLOCK_") - blockHashPrefix = []byte("HASH_") - blockHeightPrefix = []byte("HEIGHT_") - txKeyPrefix = []byte("TX_") -) - -// Service defines the main chain explorer service structure. -type Service struct { - db *leveldb.DB - - caller *rpc.Caller - stopped int32 - stopCh chan struct{} - triggerCh chan struct{} - wg sync.WaitGroup - - // new block check interval - checkInterval time.Duration - - // next block to fetch - nextBlockToFetch uint32 -} - -// NewService creates new explorer service handler. -func NewService(checkInterval time.Duration) (service *Service, err error) { - // open explorer database - dbFile := filepath.Join(conf.GConf.WorkingRoot, dbFileName) - db, err := leveldb.OpenFile(dbFile, nil) - if err != nil { - return - } - - defer func() { - if err != nil { - db.Close() - } - }() - - // init service - service = &Service{ - db: db, - caller: rpc.NewCaller(), - stopCh: make(chan struct{}), - triggerCh: make(chan struct{}, 1), - checkInterval: checkInterval, - } - - return -} - -func (s *Service) start() (err error) { - if atomic.LoadInt32(&s.stopped) == 1 { - // stopped - return ErrStopped - } - - if err = s.getSubscriptionCheckpoint(); err != nil { - return - } - - // start subscription worker - s.wg.Add(1) - go s.subscriptionWorker() - - return -} - -func (s *Service) getBlockByCount(c uint32) (b *pt.BPBlock, count uint32, height uint32, err error) { - var bKey []byte - bKey = append(bKey, blockKeyPrefix...) - bKey = append(bKey, uint32ToBytes(c)...) - - it := s.db.NewIterator(util.BytesPrefix(bKey), nil) - if it.First() { - // decode - bKeyLen := len(bKey) - hBytes := it.Key()[bKeyLen : bKeyLen+4] - height = bytesToUint32(hBytes) - count = c - err = utils.DecodeMsgPack(it.Value(), &b) - } else { - // not found - err = ErrNotFound - } - it.Release() - - if err != nil { - // ignore iterator error - it.Error() - return - } - - err = it.Error() - - return -} - -func (s *Service) getBlockByHash(h *hash.Hash) (b *pt.BPBlock, count uint32, height uint32, err error) { - if h == nil { - err = ErrNotFound - return - } - - var bKey []byte - bKey = append(bKey, blockHashPrefix...) - bKey = append(bKey, h[:]...) - - var bCountData []byte - if bCountData, err = s.db.Get(bKey, nil); err != nil { - if err == leveldb.ErrNotFound { - err = ErrNotFound - } - return - } - - count = bytesToUint32(bCountData) - return s.getBlockByCount(count) -} - -func (s *Service) getBlockByHeight(h uint32) (b *pt.BPBlock, count uint32, height uint32, err error) { - var bKey []byte - bKey = append(bKey, blockHeightPrefix...) - bKey = append(bKey, uint32ToBytes(h)...) - - var bCountData []byte - if bCountData, err = s.db.Get(bKey, nil); err != nil { - if err == leveldb.ErrNotFound { - err = ErrNotFound - } - return - } - - count = bytesToUint32(bCountData) - return s.getBlockByCount(count) -} - -func (s *Service) getTxByHash(h *hash.Hash) (tx pi.Transaction, c uint32, height uint32, err error) { - if h == nil { - err = ErrNotFound - return - } - - var txKey []byte - txKey = append(txKey, txKeyPrefix...) - txKey = append(txKey, h[:]...) - - var bCountData []byte - if bCountData, err = s.db.Get(txKeyPrefix, nil); err != nil { - if err == leveldb.ErrNotFound { - err = ErrNotFound - } - return - } - - c = bytesToUint32(bCountData) - - var b *pt.BPBlock - if b, _, height, err = s.getBlockByCount(c); err != nil { - return - } - - if b == nil || b.Transactions == nil { - err = ErrNotFound - return - } - - for _, curTx := range b.Transactions { - if curTx == nil { - continue - } - - if curH := curTx.Hash(); h.IsEqual(&curH) { - tx = curTx - break - } - } - - if tx == nil { - err = ErrNotFound - return - } - - return -} - -func (s *Service) getHighestCount() (c uint32, err error) { - // load previous committed counts - it := s.db.NewIterator(util.BytesPrefix(blockKeyPrefix), nil) - if it.Last() { - // decode block count from key - blockKey := it.Key() - prefixLen := len(blockKeyPrefix) - c = bytesToUint32(blockKey[prefixLen : prefixLen+4]) - } else { - err = ErrNotFound - } - it.Release() - - if err != nil { - it.Error() - return - } - - err = it.Error() - - return -} - -func (s *Service) getSubscriptionCheckpoint() (err error) { - var lastBlockCount uint32 - if lastBlockCount, err = s.getHighestCount(); err != nil { - log.WithError(err).Warning("get last block failed") - - if err == ErrNotFound { - // not found, set last block count to 0 - log.Info("set current block count fetch head to 0") - err = nil - atomic.StoreUint32(&s.nextBlockToFetch, 0) - } - - return - } - - log.WithFields(log.Fields{ - "count": lastBlockCount, - }).Info("fetched last block count") - - atomic.StoreUint32(&s.nextBlockToFetch, lastBlockCount+1) - - return -} - -func (s *Service) subscriptionWorker() { - defer s.wg.Done() - - log.Info("started subscription worker") - for { - select { - case <-s.stopCh: - log.Info("exited subscription worker") - return - case <-s.triggerCh: - case <-time.After(s.checkInterval): - } - - // request block producer for next block - s.requestBlock() - } -} - -func (s *Service) requestBlock() { - if atomic.LoadInt32(&s.stopped) == 1 { - return - } - - blockCount := atomic.LoadUint32(&s.nextBlockToFetch) - log.WithFields(log.Fields{"count": blockCount}).Info("try fetch next block") - - req := &pt.FetchBlockByCountReq{Count: blockCount} - resp := &pt.FetchBlockResp{} - - if err := s.requestBP(route.MCCFetchBlockByCount.String(), req, resp); err != nil { - // fetch block failed - log.WithError(err).Warning("fetch block failed,wait for next round") - return - } - - // process block - if err := s.processBlock(blockCount, resp.Height, resp.Block); err != nil { - log.WithError(err).Warning("process block failed, try fetch/process again") - return - } - - atomic.AddUint32(&s.nextBlockToFetch, 1) - - // last fetch success, trigger next fetch for fast sync - select { - case s.triggerCh <- struct{}{}: - default: - } -} - -func (s *Service) processBlock(c uint32, h uint32, b *pt.BPBlock) (err error) { - if b == nil { - log.WithField("count", c).Warning("processed nil block") - return ErrNilBlock - } - - log.WithFields(log.Fields{ - "hash": b.BlockHash(), - "parent": b.ParentHash(), - "height": h, - "count": c, - }).Info("process new block") - - if err = s.saveTransactions(c, b.Transactions); err != nil { - return - } - - err = s.saveBlock(c, h, b) - - return -} - -func (s *Service) saveTransactions(c uint32, txs []pi.Transaction) (err error) { - if txs == nil || len(txs) == 0 { - return - } - - for _, t := range txs { - if err = s.saveTransaction(c, t); err != nil { - return - } - } - - return -} - -func (s *Service) saveTransaction(c uint32, tx pi.Transaction) (err error) { - if tx == nil { - return ErrNilTransaction - } - - txHash := tx.Hash() - - var txKey []byte - - txKey = append(txKey, txKeyPrefix...) - txKey = append(txKey, txHash[:]...) - txData := uint32ToBytes(c) - - err = s.db.Put(txKey, txData, nil) - - return -} - -func (s *Service) saveBlock(c uint32, h uint32, b *pt.BPBlock) (err error) { - if b == nil { - return ErrNilBlock - } - - bHash := b.BlockHash() - - var buf *bytes.Buffer - - if buf, err = utils.EncodeMsgPack(b); err != nil { - return - } - - cBytes := uint32ToBytes(c) - hBytes := uint32ToBytes(h) - - var bKey, bHashKey, bHeightKey []byte - - bKey = append(bKey, blockKeyPrefix...) - bKey = append(bKey, cBytes...) - bKey = append(bKey, hBytes...) - - bHashKey = append(bHashKey, blockHashPrefix...) - bHashKey = append(bHashKey, bHash[:]...) - - bHeightKey = append(bHeightKey, blockHeightPrefix...) - bHeightKey = append(bHeightKey, hBytes...) - - if err = s.db.Put(bKey, buf.Bytes(), nil); err != nil { - return - } - - if err = s.db.Put(bHashKey, cBytes, nil); err != nil { - return - } - - err = s.db.Put(bHeightKey, cBytes, nil) - - return -} - -func (s *Service) requestBP(method string, request interface{}, response interface{}) (err error) { - var bpNodeID proto.NodeID - if bpNodeID, err = rpc.GetCurrentBP(); err != nil { - return - } - return s.caller.CallNode(bpNodeID, method, request, response) -} - -func (s *Service) stop() (err error) { - if !atomic.CompareAndSwapInt32(&s.stopped, 0, 1) { - // stopped - return ErrStopped - } - - log.Info("stop subscription") - - select { - case <-s.stopCh: - default: - close(s.stopCh) - } - - s.wg.Wait() - s.db.Close() - - return -} - -func uint32ToBytes(h uint32) (data []byte) { - data = make([]byte, 4) - binary.BigEndian.PutUint32(data, h) - return -} - -func bytesToUint32(data []byte) uint32 { - return binary.BigEndian.Uint32(data) -} From 0a12c8580c84ca3ae409026709f4b0553cb2fe0f Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Feb 2019 17:36:18 +0800 Subject: [PATCH 012/244] Add cql test file for building test binary. --- cmd/cql/main_test.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 cmd/cql/main_test.go diff --git a/cmd/cql/main_test.go b/cmd/cql/main_test.go new file mode 100644 index 000000000..03a7b6494 --- /dev/null +++ b/cmd/cql/main_test.go @@ -0,0 +1,26 @@ +// +build testbinary + +/* + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package main + +import "testing" + +func TestMain(m *testing.M) { + defer m.Run() + main() +} From 9b75215033edfeab96b461ce27523392948b6120 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Feb 2019 18:24:02 +0800 Subject: [PATCH 013/244] Change cql.test test case listen dir. --- sqlchain/observer/observation_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index adfb5f3ce..f9a95ab31 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -491,7 +491,7 @@ func TestFullProcess(t *testing.T) { []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "-web", "127.0.0.1:4123", + "-web", "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, ) @@ -722,7 +722,7 @@ func TestFullProcess(t *testing.T) { []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "-web", "127.0.0.1:4123", + "-web", "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, ) From 4e32f5d0afb8032bc2e6a0eeba394c8bef65da44 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 11:02:25 +0800 Subject: [PATCH 014/244] Remove make runner target. --- Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 0276036c6..a1519d68f 100644 --- a/Makefile +++ b/Makefile @@ -40,7 +40,7 @@ builder: status -f docker/builder.Dockerfile \ . -runner: builder +docker: builder docker build \ --tag $(IMAGE):$(VERSION) \ --tag $(IMAGE):latest \ @@ -49,8 +49,6 @@ runner: builder -f docker/Dockerfile \ . -docker: runner - docker_clean: status docker rmi -f $(BUILDER):latest docker rmi -f $(IMAGE):latest From 3a1cf7be3cc302f7a195f450ac205d1a804beff9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 14:51:57 +0800 Subject: [PATCH 015/244] Upgrade travis go version to 1.11.x --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 23f022dff..23da32731 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,7 +7,7 @@ env: - REVIEWDOG_VERSION=0.9.11 language: go go: - - '1.10.x' + - '1.11.x' os: - linux From 2c7b406ecb14c947aa554ca0ee62aa68751e77b9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 25 Feb 2019 22:48:42 +0800 Subject: [PATCH 016/244] Add param for test old client. --- test/testnet_client/run.sh | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index a17511d0c..7145c4cf6 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -5,6 +5,10 @@ set -e TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) +CURRENTBIN=${PROJECT_DIR}/bin +OLDBIN="/CovenantSQL_bins/v0.3.0" +BIN=${CURRENTBIN} + echo ${PROJECT_DIR} # Build @@ -12,27 +16,32 @@ echo ${PROJECT_DIR} # cd ${PROJECT_DIR} && make use_all_cores cd ${TEST_WD} -echo -ne "y\n" | ${PROJECT_DIR}/bin/cql-utils -tool confgen -skip-master-key -${PROJECT_DIR}/bin/cql-utils -tool addrgen -skip-master-key | tee wallet.txt + +if [ "old" == "$param" ]; then + BIN=${OLDBIN} +fi + +echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key +${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt #get wallet addr wallet=$(awk '{print $3}' wallet.txt) #transfer some coin to above address -${PROJECT_DIR}/bin/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ +${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm -${PROJECT_DIR}/bin/cql -get-balance +${BIN}/cql -get-balance -${PROJECT_DIR}/bin/cql -create 2 -wait-tx-confirm | tee dsn.txt +${BIN}/cql -create 2 -wait-tx-confirm | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -${PROJECT_DIR}/bin/cql -dsn ${dsn} \ +${BIN}/cql -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' -${PROJECT_DIR}/bin/cql -dsn ${dsn} \ +${BIN}/cql -dsn ${dsn} \ -command 'show tables;' | tee result.log grep "1 row" result.log From bb59b5fbb1b6c101ece9bd1338a315ee1f2b5ad9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 25 Feb 2019 23:23:16 +0800 Subject: [PATCH 017/244] Add start_bp_miner for makefile, Add test old clinet task. --- .gitlab-ci.yml | 9 +++++++++ Makefile | 5 +++++ test/testnet_client/run.sh | 35 +++++++++++++++++++++++++---------- 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cec6a3234..04c353de6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -41,3 +41,12 @@ compatibility-testnet: - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - set -x - ./test/testnet_client/run.sh + +compatibility-old-client: + stage: test + script: + - set -o errexit + - set -o pipefail + - set -x + - ./test/testnet_client/run.sh old + diff --git a/Makefile b/Makefile index a1519d68f..ec34db071 100644 --- a/Makefile +++ b/Makefile @@ -68,6 +68,11 @@ start: docker-compose up --no-start docker-compose start +start_bp_miner: + docker-compose down + docker-compose up --no-start covenantsql_bp_0 covenantsql_bp_1 covenantsql_bp_2 covenantsql_miner_0 covenantsql_miner_1 covenantsql_miner_2 + docker-compose start covenantsql_bp_0 covenantsql_bp_1 covenantsql_bp_2 covenantsql_miner_0 covenantsql_miner_1 covenantsql_miner_2 + stop: docker-compose down diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 7145c4cf6..6d7578f72 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -19,18 +19,27 @@ cd ${TEST_WD} if [ "old" == "$param" ]; then BIN=${OLDBIN} + rm -rf ~/.cql + mkdir -p ~/.cql + cp ${PROJECT_DIR}/test/service/node_c/config.yaml ~/.cql/ + cp ${PROJECT_DIR}/test/service/node_c/private.key ~/.cql/ + cd ${PROJECT_DIR} + make runner + make start_bp_miner + cd ${TEST_WD} +else + BIN=${CURRENTBIN} + echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key + + #get wallet addr + ${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt + wallet=$(awk '{print $3}' wallet.txt) + + #transfer some coin to above address + ${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm fi -echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key -${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt - -#get wallet addr -wallet=$(awk '{print $3}' wallet.txt) - -#transfer some coin to above address -${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ - '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm - ${BIN}/cql -get-balance ${BIN}/cql -create 2 -wait-tx-confirm | tee dsn.txt @@ -44,4 +53,10 @@ ${BIN}/cql -dsn ${dsn} \ ${BIN}/cql -dsn ${dsn} \ -command 'show tables;' | tee result.log +# clean docker +if [ "old" == "$param" ]; then + make stop + make docker_clean +fi + grep "1 row" result.log From 572160f67dee89af20e85aefe0752e25592ad373 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 25 Feb 2019 23:39:32 +0800 Subject: [PATCH 018/244] Fix test old client case missing param set bug. --- test/testnet_client/run.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 6d7578f72..77cb2fdc9 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -1,5 +1,5 @@ #!/bin/bash -x - +param=$1 set -e TEST_WD=$(cd $(dirname $0)/; pwd) From a5e6baa3b33f8294edfb88a2a4df49819eed8c1d Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Feb 2019 15:18:27 +0800 Subject: [PATCH 019/244] Try using build docker with docker command. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 04c353de6..3261afde7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: covenantsql/build +image: laodouya/covenantsqlbuild variables: REVIEWDOG_VERSION: 0.9.11 From 5061e5cc8027468b7abef7b39d455fcf727ba376 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 16:35:47 +0800 Subject: [PATCH 020/244] Revert compatibility test docker build. --- .gitlab-ci.yml | 2 +- Makefile | 5 ----- test/testnet_client/run.sh | 10 ---------- 3 files changed, 1 insertion(+), 16 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3261afde7..04c353de6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,4 +1,4 @@ -image: laodouya/covenantsqlbuild +image: covenantsql/build variables: REVIEWDOG_VERSION: 0.9.11 diff --git a/Makefile b/Makefile index ec34db071..a1519d68f 100644 --- a/Makefile +++ b/Makefile @@ -68,11 +68,6 @@ start: docker-compose up --no-start docker-compose start -start_bp_miner: - docker-compose down - docker-compose up --no-start covenantsql_bp_0 covenantsql_bp_1 covenantsql_bp_2 covenantsql_miner_0 covenantsql_miner_1 covenantsql_miner_2 - docker-compose start covenantsql_bp_0 covenantsql_bp_1 covenantsql_bp_2 covenantsql_miner_0 covenantsql_miner_1 covenantsql_miner_2 - stop: docker-compose down diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 77cb2fdc9..a837237d6 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -23,10 +23,6 @@ if [ "old" == "$param" ]; then mkdir -p ~/.cql cp ${PROJECT_DIR}/test/service/node_c/config.yaml ~/.cql/ cp ${PROJECT_DIR}/test/service/node_c/private.key ~/.cql/ - cd ${PROJECT_DIR} - make runner - make start_bp_miner - cd ${TEST_WD} else BIN=${CURRENTBIN} echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key @@ -53,10 +49,4 @@ ${BIN}/cql -dsn ${dsn} \ ${BIN}/cql -dsn ${dsn} \ -command 'show tables;' | tee result.log -# clean docker -if [ "old" == "$param" ]; then - make stop - make docker_clean -fi - grep "1 row" result.log From b767b94f67fc616c9fa305a1dcc66dbe7552112f Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 17:22:25 +0800 Subject: [PATCH 021/244] Let testnet_client only run testnet. --- test/testnet_client/run.sh | 33 ++++++++++----------------------- 1 file changed, 10 insertions(+), 23 deletions(-) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index a837237d6..5e53cd7d2 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -1,13 +1,9 @@ #!/bin/bash -x -param=$1 set -e TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) - -CURRENTBIN=${PROJECT_DIR}/bin -OLDBIN="/CovenantSQL_bins/v0.3.0" -BIN=${CURRENTBIN} +BIN=${PROJECT_DIR}/bin echo ${PROJECT_DIR} @@ -17,24 +13,15 @@ echo ${PROJECT_DIR} cd ${TEST_WD} -if [ "old" == "$param" ]; then - BIN=${OLDBIN} - rm -rf ~/.cql - mkdir -p ~/.cql - cp ${PROJECT_DIR}/test/service/node_c/config.yaml ~/.cql/ - cp ${PROJECT_DIR}/test/service/node_c/private.key ~/.cql/ -else - BIN=${CURRENTBIN} - echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key - - #get wallet addr - ${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt - wallet=$(awk '{print $3}' wallet.txt) - - #transfer some coin to above address - ${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ - '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm -fi +echo -ne "y\n" | ${BIN}/cql-utils -tool confgen -skip-master-key + +#get wallet addr +${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt +wallet=$(awk '{print $3}' wallet.txt) + +#transfer some coin to above address +${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm ${BIN}/cql -get-balance From 459bd5364cf99c7ff122abef1d64dc16d4681fad Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 19:55:34 +0800 Subject: [PATCH 022/244] Add test old clinet to current bp miner test. --- .gitlab-ci.yml | 2 +- test/compatibility/old_client_test.sh | 46 +++++++++++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100755 test/compatibility/old_client_test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 04c353de6..6000d0df8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,5 +48,5 @@ compatibility-old-client: - set -o errexit - set -o pipefail - set -x - - ./test/testnet_client/run.sh old + - ./test/compatibility/old_client_test.sh /CovenantSQL_bins/v0.3.0 diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh new file mode 100755 index 000000000..0afe0afde --- /dev/null +++ b/test/compatibility/old_client_test.sh @@ -0,0 +1,46 @@ +#!/bin/bash -x + +OUTSIDE_BIN_DIR=$1 + +TEST_WD=$(cd $(dirname $0)/; pwd) +PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) + +if [ -n "$OUTSIDE_BIN_DIR" ]; then + BIN=${OUTSIDE_BIN_DIR} +else + BIN=${PROJECT_DIR}/bin +fi + +cp ${PROJECT_DIR}/test/integration/node_c/config.yaml ~/.cql/ +cp ${PROJECT_DIR}/test/integration/node_c/private.key ~/.cql/ + +# start current version bp +${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml +${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml +${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml + +# wait bp start +sleep 10 + +# start current version miner +${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml +${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml +${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml + +cd ${TEST_WD} + +${BIN}/cql -get-balance + +${BIN}/cql -create 2 -wait-tx-confirm | tee dsn.txt + +#get dsn +dsn=$(cat dsn.txt) + +${BIN}/cql -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + +${BIN}/cql -dsn ${dsn} \ + -command 'show tables;' | tee result.log + +grep "1 row" result.log + From b0972cc11e37e16171910f7670a31af23532a6b2 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 20:14:44 +0800 Subject: [PATCH 023/244] Make bp miner bin before compatibility test. --- .gitlab-ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6000d0df8..f947e739e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -48,5 +48,7 @@ compatibility-old-client: - set -o errexit - set -o pipefail - set -x + - make clean + - make bin/cqld bin/cql-minerd - ./test/compatibility/old_client_test.sh /CovenantSQL_bins/v0.3.0 From 5435557b9cd8bc0a0f88d7bd424090522a957b61 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 20:28:54 +0800 Subject: [PATCH 024/244] Make compatibility test bp and miner running background --- test/compatibility/old_client_test.sh | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index 0afe0afde..2cee9ed91 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -4,6 +4,7 @@ OUTSIDE_BIN_DIR=$1 TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) +cd ${TEST_WD} if [ -n "$OUTSIDE_BIN_DIR" ]; then BIN=${OUTSIDE_BIN_DIR} @@ -15,19 +16,17 @@ cp ${PROJECT_DIR}/test/integration/node_c/config.yaml ~/.cql/ cp ${PROJECT_DIR}/test/integration/node_c/private.key ~/.cql/ # start current version bp -${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml -${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml -${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp0.log +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp1.log +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp2.log # wait bp start sleep 10 # start current version miner -${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml -${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml -${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml - -cd ${TEST_WD} +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner0.log +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner1.log +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner2.log ${BIN}/cql -get-balance From b5232b8fb46da4bbffe4fec1c3c1c36dff2b0c6b Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 20:38:35 +0800 Subject: [PATCH 025/244] Fix bp miner not running background. --- test/compatibility/old_client_test.sh | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index 2cee9ed91..83620314b 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -12,33 +12,32 @@ else BIN=${PROJECT_DIR}/bin fi -cp ${PROJECT_DIR}/test/integration/node_c/config.yaml ~/.cql/ -cp ${PROJECT_DIR}/test/integration/node_c/private.key ~/.cql/ - +# TODO get pid for kill # start current version bp -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp0.log -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp1.log -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp2.log +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp0.log & +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp1.log & +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp2.log & # wait bp start sleep 10 # start current version miner -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner0.log -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner1.log -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner2.log +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner0.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner1.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner2.log & + -${BIN}/cql -get-balance +${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance -${BIN}/cql -create 2 -wait-tx-confirm | tee dsn.txt +${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -${BIN}/cql -dsn ${dsn} \ +${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' -${BIN}/cql -dsn ${dsn} \ +${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'show tables;' | tee result.log grep "1 row" result.log From 223975ce91e92531da5679198fa9bd7bdefe5103 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 27 Feb 2019 20:46:27 +0800 Subject: [PATCH 026/244] Fix compatibility test, wait for miner start. --- .gitlab-ci.yml | 2 +- test/compatibility/old_client_test.sh | 14 ++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f947e739e..557774d7a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,6 +49,6 @@ compatibility-old-client: - set -o pipefail - set -x - make clean - - make bin/cqld bin/cql-minerd + - make -j2 bin/cqld bin/cql-minerd - ./test/compatibility/old_client_test.sh /CovenantSQL_bins/v0.3.0 diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index 83620314b..23802c210 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -14,18 +14,20 @@ fi # TODO get pid for kill # start current version bp -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp0.log & -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp1.log & -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/bp2.log & +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${OUTSIDE_BIN_DIR}/bp0.log & +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${OUTSIDE_BIN_DIR}/bp1.log & +nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${OUTSIDE_BIN_DIR}/bp2.log & # wait bp start sleep 10 # start current version miner -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner0.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner1.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>&1 > ${OUTSIDE_BIN_DIR}/miner2.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner0.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner1.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & +# wait miner start +sleep 10 ${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance From 24a2a9eb5c0f836b6bc572e62f6517b2bc7d6c14 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 09:29:42 +0800 Subject: [PATCH 027/244] Increase integration miner client init stable coin count. --- test/integration/node_0/config.yaml | 16 ++++++++-------- test/integration/node_1/config.yaml | 16 ++++++++-------- test/integration/node_2/config.yaml | 16 ++++++++-------- test/integration/node_c/config.yaml | 16 ++++++++-------- test/integration/node_miner_0/config.yaml | 16 ++++++++-------- test/integration/node_miner_1/config.yaml | 16 ++++++++-------- test/integration/node_miner_2/config.yaml | 16 ++++++++-------- 7 files changed, 56 insertions(+), 56 deletions(-) diff --git a/test/integration/node_0/config.yaml b/test/integration/node_0/config.yaml index 397b65eb8..cd1a54cda 100644 --- a/test/integration/node_0/config.yaml +++ b/test/integration/node_0/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_1/config.yaml b/test/integration/node_1/config.yaml index d35061395..eca03e17d 100644 --- a/test/integration/node_1/config.yaml +++ b/test/integration/node_1/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_2/config.yaml b/test/integration/node_2/config.yaml index 85dce477d..30a422bc3 100644 --- a/test/integration/node_2/config.yaml +++ b/test/integration/node_2/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_c/config.yaml b/test/integration/node_c/config.yaml index f4f265e8c..84320e6c2 100644 --- a/test/integration/node_c/config.yaml +++ b/test/integration/node_c/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index ceac395a8..ce99baf98 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 Miner: IsTestMode: true RootDir: "./data" diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index 41eb0305b..6cec5bbc7 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 Miner: IsTestMode: true RootDir: "./data" diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index 51ec8f581..ec0ac5060 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 Miner: IsTestMode: true RootDir: "./data" From cdb9222fa9494786ef753fa6e954ce890c671f8b Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 09:42:48 +0800 Subject: [PATCH 028/244] Increase compatibility test wait bp miner time. --- test/compatibility/old_client_test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index 23802c210..010b9e791 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -19,7 +19,7 @@ nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/con nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${OUTSIDE_BIN_DIR}/bp2.log & # wait bp start -sleep 10 +sleep 20 # start current version miner nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner0.log & @@ -27,7 +27,7 @@ nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & # wait miner start -sleep 10 +sleep 30 ${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance From 5a00d664d0f90558bfd932e597de0358d4b8e957 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 09:46:18 +0800 Subject: [PATCH 029/244] Disable other ci test. --- .gitlab-ci.yml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 557774d7a..cb5c971db 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,24 +23,24 @@ before_script: - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog -test-my-project: - stage: test - script: - - ./alltest.sh +# test-my-project: + # stage: test + # script: + # - ./alltest.sh -compatibility-testnet: - stage: test - script: - - set -o errexit - - set -o pipefail - - commit=$(git rev-parse --short HEAD) - - branch=$(git branch -rv |grep $commit | awk '{print $1}') - - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - - make clean - - make -j8 client - - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - - set -x - - ./test/testnet_client/run.sh +# compatibility-testnet: + # stage: test + # script: + # - set -o errexit + # - set -o pipefail + # - commit=$(git rev-parse --short HEAD) + # - branch=$(git branch -rv |grep $commit | awk '{print $1}') + # - if [[ $branch =~ "/beta_" ]]; then exit 0; fi + # - make clean + # - make -j8 client + # - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + # - set -x + # - ./test/testnet_client/run.sh compatibility-old-client: stage: test From 8349f4cd8cd6d67ef28a13e9e75772c4db8cc2fd Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 09:52:12 +0800 Subject: [PATCH 030/244] Fail compatibility test if dsn not created. --- test/compatibility/old_client_test.sh | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index 010b9e791..b82264f03 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -27,7 +27,7 @@ nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & # wait miner start -sleep 30 +sleep 20 ${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance @@ -35,6 +35,9 @@ ${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create 2 #get dsn dsn=$(cat dsn.txt) +if [ -z "$dsn" ]; then + exit 1 +fi ${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' From 70d1914795d3955af5f7e879ab7e83a52e108ada Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 09:54:55 +0800 Subject: [PATCH 031/244] Fix compatibility test miner config. --- test/compatibility/old_client_test.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index b82264f03..cc4365f4c 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -23,8 +23,8 @@ sleep 20 # start current version miner nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner0.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner1.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${OUTSIDE_BIN_DIR}/miner1.log & +nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & # wait miner start sleep 20 From 2978a941429e44459b4d2bd3d8c4f96b9ad96c1a Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 10:28:44 +0800 Subject: [PATCH 032/244] Revert "Disable other ci test." This reverts commit 5a00d664d0f90558bfd932e597de0358d4b8e957. --- .gitlab-ci.yml | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cb5c971db..557774d7a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,24 +23,24 @@ before_script: - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog -# test-my-project: - # stage: test - # script: - # - ./alltest.sh +test-my-project: + stage: test + script: + - ./alltest.sh -# compatibility-testnet: - # stage: test - # script: - # - set -o errexit - # - set -o pipefail - # - commit=$(git rev-parse --short HEAD) - # - branch=$(git branch -rv |grep $commit | awk '{print $1}') - # - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - # - make clean - # - make -j8 client - # - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - # - set -x - # - ./test/testnet_client/run.sh +compatibility-testnet: + stage: test + script: + - set -o errexit + - set -o pipefail + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi + - make clean + - make -j8 client + - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + - set -x + - ./test/testnet_client/run.sh compatibility-old-client: stage: test From bf1eafd795abaa3bf9d5b231251bab58e7c1556c Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 10:35:49 +0800 Subject: [PATCH 033/244] Add ci build stage, reuse build result in test stage. --- .gitlab-ci.yml | 16 ++++++++++++++-- alltest.sh | 3 --- test/compatibility/old_client_test.sh | 1 - 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 557774d7a..83a7c2399 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,9 +23,19 @@ before_script: - ulimit -n 8192 # - curl -fSL https://github.com/haya14busa/reviewdog/releases/download/$REVIEWDOG_VERSION/reviewdog_linux_amd64 -o ~/bin/reviewdog && chmod +x ~/bin/reviewdog +build: + stage: build + script: + - make clean + - make use_all_cores + - mv bin /CovenantSQL_bins/current + test-my-project: stage: test script: + - make clean + - mkdir -p bin + - cp -r /CovenantSQL_bins/current/* bin/ - ./alltest.sh compatibility-testnet: @@ -37,7 +47,8 @@ compatibility-testnet: - branch=$(git branch -rv |grep $commit | awk '{print $1}') - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - - make -j8 client + - mkdir -p bin + - cp -r /CovenantSQL_bins/current/* bin/ - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - set -x - ./test/testnet_client/run.sh @@ -49,6 +60,7 @@ compatibility-old-client: - set -o pipefail - set -x - make clean - - make -j2 bin/cqld bin/cql-minerd + - mkdir -p bin + - cp -r /CovenantSQL_bins/current/* bin/ - ./test/compatibility/old_client_test.sh /CovenantSQL_bins/v0.3.0 diff --git a/alltest.sh b/alltest.sh index 60e656091..31421cda0 100755 --- a/alltest.sh +++ b/alltest.sh @@ -5,9 +5,6 @@ set -o pipefail set -o nounset main() { - make clean - make -j5 bp miner bin/cql.test - go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh index cc4365f4c..b2064e5e7 100755 --- a/test/compatibility/old_client_test.sh +++ b/test/compatibility/old_client_test.sh @@ -12,7 +12,6 @@ else BIN=${PROJECT_DIR}/bin fi -# TODO get pid for kill # start current version bp nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${OUTSIDE_BIN_DIR}/bp0.log & nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${OUTSIDE_BIN_DIR}/bp1.log & From fbdbda43eb2272c3d2feb34b8f1655473f7c2953 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 13:24:53 +0800 Subject: [PATCH 034/244] Add old bp, old miner case for compatibility test. --- .gitlab-ci.yml | 27 +++++++++-- test/compatibility/old_client_test.sh | 48 ------------------- test/compatibility/specific_old.sh | 67 +++++++++++++++++++++++++++ 3 files changed, 90 insertions(+), 52 deletions(-) delete mode 100755 test/compatibility/old_client_test.sh create mode 100755 test/compatibility/specific_old.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 83a7c2399..8b12f3272 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,6 +5,8 @@ variables: REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension + LAST_VERSION: v0.3.0 + # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins before_script: # Setup dependency management tool @@ -34,7 +36,6 @@ test-my-project: stage: test script: - make clean - - mkdir -p bin - cp -r /CovenantSQL_bins/current/* bin/ - ./alltest.sh @@ -47,7 +48,6 @@ compatibility-testnet: - branch=$(git branch -rv |grep $commit | awk '{print $1}') - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - - mkdir -p bin - cp -r /CovenantSQL_bins/current/* bin/ - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - set -x @@ -60,7 +60,26 @@ compatibility-old-client: - set -o pipefail - set -x - make clean - - mkdir -p bin - cp -r /CovenantSQL_bins/current/* bin/ - - ./test/compatibility/old_client_test.sh /CovenantSQL_bins/v0.3.0 + - ./test/compatibility/specific_old.sh client + +compatibility-old-bp: + stage: test + script: + - set -o errexit + - set -o pipefail + - set -x + - make clean + - cp -r /CovenantSQL_bins/current/* bin/ + - ./test/compatibility/specific_old.sh bp + +compatibility-old-miner: + stage: test + script: + - set -o errexit + - set -o pipefail + - set -x + - make clean + - cp -r /CovenantSQL_bins/current/* bin/ + - ./test/compatibility/specific_old.sh miner diff --git a/test/compatibility/old_client_test.sh b/test/compatibility/old_client_test.sh deleted file mode 100755 index b2064e5e7..000000000 --- a/test/compatibility/old_client_test.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -x - -OUTSIDE_BIN_DIR=$1 - -TEST_WD=$(cd $(dirname $0)/; pwd) -PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) -cd ${TEST_WD} - -if [ -n "$OUTSIDE_BIN_DIR" ]; then - BIN=${OUTSIDE_BIN_DIR} -else - BIN=${PROJECT_DIR}/bin -fi - -# start current version bp -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${OUTSIDE_BIN_DIR}/bp0.log & -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${OUTSIDE_BIN_DIR}/bp1.log & -nohup ${PROJECT_DIR}/bin/cqld -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${OUTSIDE_BIN_DIR}/bp2.log & - -# wait bp start -sleep 20 - -# start current version miner -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OUTSIDE_BIN_DIR}/miner0.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${OUTSIDE_BIN_DIR}/miner1.log & -nohup ${PROJECT_DIR}/bin/cql-minerd -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${OUTSIDE_BIN_DIR}/miner2.log & - -# wait miner start -sleep 20 - -${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance - -${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt - -#get dsn -dsn=$(cat dsn.txt) -if [ -z "$dsn" ]; then - exit 1 -fi - -${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'create table test_for_new_account(column1 int);' - -${BIN}/cql -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'show tables;' | tee result.log - -grep "1 row" result.log - diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh new file mode 100755 index 000000000..7abff9b24 --- /dev/null +++ b/test/compatibility/specific_old.sh @@ -0,0 +1,67 @@ +#!/bin/bash -x +test_case=$1 +set -e + +TEST_WD=$(cd $(dirname $0)/; pwd) +PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) +cd ${TEST_WD} + +OLD_BIN_DIR=/CovenantSQL_bins/${LAST_VERSION} +NEW_BIN_DIR=${PROJECT_DIR}/bin + +case $test_case in + "client") + CLIENTBIN=${OLD_BIN_DIR}/cql + BPBIN=${NEW_BIN_DIR}/cqld + MINERBIN=${NEW_BIN_DIR}/cql-miner + ;; + "bp") + CLIENTBIN=${NEW_BIN_DIR}/cql + BPBIN=${OLD_BIN_DIR}/cqld + MINERBIN=${NEW_BIN_DIR}/cql-miner + ;; + "miner") + CLIENTBIN=${NEW_BIN_DIR}/cql + BPBIN=${NEW_BIN_DIR}/cqld + MINERBIN=${OLD_BIN_DIR}/cql-miner + ;; + *) + return 1 + ;; +esac + + +# start current version bp +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${OLD_BIN_DIR}/bp0.log & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${OLD_BIN_DIR}/bp1.log & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${OLD_BIN_DIR}/bp2.log & + +# wait bp start +sleep 20 + +# start current version miner +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OLD_BIN_DIR}/miner0.log & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${OLD_BIN_DIR}/miner1.log & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${OLD_BIN_DIR}/miner2.log & + +# wait miner start +sleep 20 + +${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance + +${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt + +#get dsn +dsn=$(cat dsn.txt) +if [ -z "$dsn" ]; then + return 1 +fi + +${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + +${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'show tables;' | tee result.log + +grep "1 row" result.log + From 9197878b3ef38e3697eda82e866215e4658d53b5 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 13:27:21 +0800 Subject: [PATCH 035/244] Rename gitlab ci task name. --- .gitlab-ci.yml | 10 +++++----- test/compatibility/specific_old.sh | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8b12f3272..f84fb88e7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -32,14 +32,14 @@ build: - make use_all_cores - mv bin /CovenantSQL_bins/current -test-my-project: +unit-test: stage: test script: - make clean - cp -r /CovenantSQL_bins/current/* bin/ - ./alltest.sh -compatibility-testnet: +testnet-compatibility: stage: test script: - set -o errexit @@ -53,7 +53,7 @@ compatibility-testnet: - set -x - ./test/testnet_client/run.sh -compatibility-old-client: +old-client-compatibility: stage: test script: - set -o errexit @@ -63,7 +63,7 @@ compatibility-old-client: - cp -r /CovenantSQL_bins/current/* bin/ - ./test/compatibility/specific_old.sh client -compatibility-old-bp: +old-bp-compatibility: stage: test script: - set -o errexit @@ -73,7 +73,7 @@ compatibility-old-bp: - cp -r /CovenantSQL_bins/current/* bin/ - ./test/compatibility/specific_old.sh bp -compatibility-old-miner: +old-miner-compatibility: stage: test script: - set -o errexit diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 7abff9b24..59c4391a3 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -26,7 +26,7 @@ case $test_case in MINERBIN=${OLD_BIN_DIR}/cql-miner ;; *) - return 1 + exit 1 ;; esac @@ -54,7 +54,7 @@ ${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create #get dsn dsn=$(cat dsn.txt) if [ -z "$dsn" ]; then - return 1 + exit 1 fi ${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ From 1112e67319b26efc58c812b6ced7aac05df87829 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 14:00:56 +0800 Subject: [PATCH 036/244] Fix compatibility miner bin name wrong. --- .gitlab-ci.yml | 3 ++- test/compatibility/specific_old.sh | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f84fb88e7..8f87838e3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -30,7 +30,8 @@ build: script: - make clean - make use_all_cores - - mv bin /CovenantSQL_bins/current + - rm /CovenantSQL_bins/current/* + - cp bin/* /CovenantSQL_bins/current/ unit-test: stage: test diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 59c4391a3..627dc237c 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -13,17 +13,17 @@ case $test_case in "client") CLIENTBIN=${OLD_BIN_DIR}/cql BPBIN=${NEW_BIN_DIR}/cqld - MINERBIN=${NEW_BIN_DIR}/cql-miner + MINERBIN=${NEW_BIN_DIR}/cql-minerd ;; "bp") CLIENTBIN=${NEW_BIN_DIR}/cql BPBIN=${OLD_BIN_DIR}/cqld - MINERBIN=${NEW_BIN_DIR}/cql-miner + MINERBIN=${NEW_BIN_DIR}/cql-minerd ;; "miner") CLIENTBIN=${NEW_BIN_DIR}/cql BPBIN=${NEW_BIN_DIR}/cqld - MINERBIN=${OLD_BIN_DIR}/cql-miner + MINERBIN=${OLD_BIN_DIR}/cql-minerd ;; *) exit 1 From 61085a627cb1c55eb27c7ec39962400defa7384a Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 14:19:18 +0800 Subject: [PATCH 037/244] Seperate logs for different compatibility test case. --- test/compatibility/specific_old.sh | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 627dc237c..333f265fe 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -9,6 +9,9 @@ cd ${TEST_WD} OLD_BIN_DIR=/CovenantSQL_bins/${LAST_VERSION} NEW_BIN_DIR=${PROJECT_DIR}/bin +LOGS_DIR=/CovenantSQL_bins/logs/old_${test_case} +mkdir -p ${LOGS_DIR} + case $test_case in "client") CLIENTBIN=${OLD_BIN_DIR}/cql @@ -31,18 +34,18 @@ case $test_case in esac -# start current version bp -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${OLD_BIN_DIR}/bp0.log & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${OLD_BIN_DIR}/bp1.log & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${OLD_BIN_DIR}/bp2.log & +# start bp +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${LOGS_DIR}/bp0.log & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${LOGS_DIR}/bp1.log & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${LOGS_DIR}/bp2.log & # wait bp start sleep 20 -# start current version miner -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${OLD_BIN_DIR}/miner0.log & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${OLD_BIN_DIR}/miner1.log & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${OLD_BIN_DIR}/miner2.log & +# start miner +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${LOGS_DIR}/miner0.log & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${LOGS_DIR}/miner1.log & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${LOGS_DIR}/miner2.log & # wait miner start sleep 20 From 2d080b5701e3f818b91ccc0d014b66675ee671d4 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 14:22:22 +0800 Subject: [PATCH 038/244] Update prev bin version to v0.4.0 --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8f87838e3..8d69a00fe 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,7 +5,7 @@ variables: REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension - LAST_VERSION: v0.3.0 + LAST_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins before_script: From e2a5551426e8178c893f660e59d7c1b6fc84ecbc Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:00:10 +0800 Subject: [PATCH 039/244] Use a independent cache folder for each ci jobs. --- .gitlab-ci.yml | 27 +++++++++++++++++++-------- test/compatibility/specific_old.sh | 4 ++-- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8d69a00fe..2d06070c8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,11 +1,18 @@ image: covenantsql/build +stages: + - build + - test + - cleanup + variables: REVIEWDOG_VERSION: 0.9.11 REVIEWDOG_GITLAB_API_TOKEN: $REVIEWDOG_TOKEN CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension - LAST_VERSION: v0.4.0 + CACHE_DIR: /CovenantSQL_bins + JOB_CACHE: ${CACHE_DIR}/${CI_JOB_ID} + PREV_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins before_script: @@ -30,14 +37,13 @@ build: script: - make clean - make use_all_cores - - rm /CovenantSQL_bins/current/* - - cp bin/* /CovenantSQL_bins/current/ + - cp bin/* ${JOB_CACHE}/ unit-test: stage: test script: - make clean - - cp -r /CovenantSQL_bins/current/* bin/ + - cp ${JOB_CACHE}/* bin/ - ./alltest.sh testnet-compatibility: @@ -49,7 +55,7 @@ testnet-compatibility: - branch=$(git branch -rv |grep $commit | awk '{print $1}') - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - - cp -r /CovenantSQL_bins/current/* bin/ + - cp ${JOB_CACHE}/* bin/ - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - set -x - ./test/testnet_client/run.sh @@ -61,7 +67,7 @@ old-client-compatibility: - set -o pipefail - set -x - make clean - - cp -r /CovenantSQL_bins/current/* bin/ + - cp ${JOB_CACHE}/* bin/ - ./test/compatibility/specific_old.sh client old-bp-compatibility: @@ -71,7 +77,7 @@ old-bp-compatibility: - set -o pipefail - set -x - make clean - - cp -r /CovenantSQL_bins/current/* bin/ + - cp ${JOB_CACHE}/* bin/ - ./test/compatibility/specific_old.sh bp old-miner-compatibility: @@ -81,6 +87,11 @@ old-miner-compatibility: - set -o pipefail - set -x - make clean - - cp -r /CovenantSQL_bins/current/* bin/ + - cp ${JOB_CACHE}/* bin/ - ./test/compatibility/specific_old.sh miner +cleanup_job: + stage: cleanup + script: + - rm -r ${JOB_CACHE}/* + when: always diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 333f265fe..f0639b87d 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -6,10 +6,10 @@ TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) cd ${TEST_WD} -OLD_BIN_DIR=/CovenantSQL_bins/${LAST_VERSION} +OLD_BIN_DIR=${CACHE_DIR}/${PREV_VERSION} NEW_BIN_DIR=${PROJECT_DIR}/bin -LOGS_DIR=/CovenantSQL_bins/logs/old_${test_case} +LOGS_DIR=${JOB_CACHE}/old_${test_case}_log mkdir -p ${LOGS_DIR} case $test_case in From 2c757acb4fa5c8d1aed1c2235f64852d81558445 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:03:15 +0800 Subject: [PATCH 040/244] Redirect compatibility test bp, miner stdout to files. --- test/compatibility/specific_old.sh | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index f0639b87d..fa3a071e8 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -33,19 +33,19 @@ case $test_case in ;; esac - +2>&1 # start bp -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml 2>${LOGS_DIR}/bp0.log & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml 2>${LOGS_DIR}/bp1.log & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml 2>${LOGS_DIR}/bp2.log & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml >${LOGS_DIR}/bp0.log 2>&1 & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml >${LOGS_DIR}/bp1.log 2>&1 & +nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml >${LOGS_DIR}/bp2.log 2>&1 & # wait bp start sleep 20 # start miner -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml 2>${LOGS_DIR}/miner0.log & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml 2>${LOGS_DIR}/miner1.log & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml 2>${LOGS_DIR}/miner2.log & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml >${LOGS_DIR}/miner0.log 2>&1 & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml >${LOGS_DIR}/miner1.log 2>&1 & +nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 & # wait miner start sleep 20 From 0607ef23222005d2f393eb50388683acf3518ea7 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:06:20 +0800 Subject: [PATCH 041/244] Create job cache dir if not exist for gitlabci. --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2d06070c8..0320e057d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,6 +37,7 @@ build: script: - make clean - make use_all_cores + - mkdir -p ${JOB_CACHE} - cp bin/* ${JOB_CACHE}/ unit-test: From 5c299e296927e1b9ca59897c27dafe5bce007145 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:20:05 +0800 Subject: [PATCH 042/244] Use CI_PIPELINE_IID instead of CI_JOB_ID for transfer cache dir. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 0320e057d..7205c6171 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,7 @@ variables: CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension CACHE_DIR: /CovenantSQL_bins - JOB_CACHE: ${CACHE_DIR}/${CI_JOB_ID} + JOB_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID} PREV_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins From 10a16c78b265dfd568f3ff54673b13c7ff699e2f Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:36:12 +0800 Subject: [PATCH 043/244] Seperate cache bin dir and log dir. --- .gitlab-ci.yml | 14 +++++++++++--- test/compatibility/specific_old.sh | 2 +- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 7205c6171..a66f17525 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,8 @@ variables: CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension CACHE_DIR: /CovenantSQL_bins - JOB_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID} + JOB_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID}/bin + LOG_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID}/logs PREV_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins @@ -38,6 +39,7 @@ build: - make clean - make use_all_cores - mkdir -p ${JOB_CACHE} + - mkdir -p ${LOG_CACHE} - cp bin/* ${JOB_CACHE}/ unit-test: @@ -91,8 +93,14 @@ old-miner-compatibility: - cp ${JOB_CACHE}/* bin/ - ./test/compatibility/specific_old.sh miner -cleanup_job: +cleanup_cache: stage: cleanup script: - - rm -r ${JOB_CACHE}/* + - rm -r ${JOB_CACHE} when: always + +cleanup_logs: + stage: cleanup + script: + - rm -r ${LOG_CACHE} + when: on_success diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index fa3a071e8..0d2ac2b37 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -9,7 +9,7 @@ cd ${TEST_WD} OLD_BIN_DIR=${CACHE_DIR}/${PREV_VERSION} NEW_BIN_DIR=${PROJECT_DIR}/bin -LOGS_DIR=${JOB_CACHE}/old_${test_case}_log +LOGS_DIR=${LOG_CACHE}/old_${test_case} mkdir -p ${LOGS_DIR} case $test_case in From 7a95e63f963cd04710e78b62b00bdd76d012de3a Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 16:54:20 +0800 Subject: [PATCH 044/244] Remove all cache folder while ci success. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a66f17525..44411c1ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -102,5 +102,5 @@ cleanup_cache: cleanup_logs: stage: cleanup script: - - rm -r ${LOG_CACHE} + - rm -r ${CACHE_DIR}/${CI_PIPELINE_IID} when: on_success From 7aef69de9419cedfda7440222207fa080cdf547d Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 17:19:05 +0800 Subject: [PATCH 045/244] Combine ci cleanup process. --- .gitlab-ci.yml | 28 +++++++++++----------------- test/compatibility/specific_old.sh | 2 +- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 44411c1ee..427df1acf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,8 +11,8 @@ variables: CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension CACHE_DIR: /CovenantSQL_bins - JOB_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID}/bin - LOG_CACHE: ${CACHE_DIR}/${CI_PIPELINE_IID}/logs + PIPLINE_CACHE: $CACHE_DIR/$CI_PIPELINE_IID + BIN_CACHE: $CACHE_DIR/$CI_PIPELINE_IID/bin PREV_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins @@ -36,17 +36,17 @@ before_script: build: stage: build script: + - set -x - make clean - make use_all_cores - - mkdir -p ${JOB_CACHE} - - mkdir -p ${LOG_CACHE} - - cp bin/* ${JOB_CACHE}/ + - mkdir -p ${BIN_CACHE} + - cp bin/* ${BIN_CACHE}/ unit-test: stage: test script: - make clean - - cp ${JOB_CACHE}/* bin/ + - cp ${BIN_CACHE}/* bin/ - ./alltest.sh testnet-compatibility: @@ -58,7 +58,7 @@ testnet-compatibility: - branch=$(git branch -rv |grep $commit | awk '{print $1}') - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - - cp ${JOB_CACHE}/* bin/ + - cp ${BIN_CACHE}/* bin/ - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkTestnetMiner2$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ - set -x - ./test/testnet_client/run.sh @@ -70,7 +70,7 @@ old-client-compatibility: - set -o pipefail - set -x - make clean - - cp ${JOB_CACHE}/* bin/ + - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh client old-bp-compatibility: @@ -80,7 +80,7 @@ old-bp-compatibility: - set -o pipefail - set -x - make clean - - cp ${JOB_CACHE}/* bin/ + - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh bp old-miner-compatibility: @@ -90,17 +90,11 @@ old-miner-compatibility: - set -o pipefail - set -x - make clean - - cp ${JOB_CACHE}/* bin/ + - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh miner cleanup_cache: stage: cleanup script: - - rm -r ${JOB_CACHE} - when: always - -cleanup_logs: - stage: cleanup - script: - - rm -r ${CACHE_DIR}/${CI_PIPELINE_IID} + - rm -r ${PIPLINE_CACHE} when: on_success diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 0d2ac2b37..182e0f276 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -9,7 +9,7 @@ cd ${TEST_WD} OLD_BIN_DIR=${CACHE_DIR}/${PREV_VERSION} NEW_BIN_DIR=${PROJECT_DIR}/bin -LOGS_DIR=${LOG_CACHE}/old_${test_case} +LOGS_DIR=${PIPLINE_CACHE}/logs/old_${test_case} mkdir -p ${LOGS_DIR} case $test_case in From 3451b40ba76dec1920779da472254794beaa2edb Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 28 Feb 2019 18:06:05 +0800 Subject: [PATCH 046/244] Use independent config file for compatibility test. --- .gitlab-ci.yml | 4 +- test/compatibility/node_0/config.yaml | 122 ++++++++++++++++++++ test/compatibility/node_0/private.key | 2 + test/compatibility/node_1/config.yaml | 122 ++++++++++++++++++++ test/compatibility/node_1/private.key | 2 + test/compatibility/node_2/config.yaml | 122 ++++++++++++++++++++ test/compatibility/node_2/private.key | 2 + test/compatibility/node_c/config.yaml | 122 ++++++++++++++++++++ test/compatibility/node_c/private.key | Bin 0 -> 96 bytes test/compatibility/node_miner_0/config.yaml | 118 +++++++++++++++++++ test/compatibility/node_miner_0/private.key | 1 + test/compatibility/node_miner_1/config.yaml | 118 +++++++++++++++++++ test/compatibility/node_miner_1/private.key | 2 + test/compatibility/node_miner_2/config.yaml | 118 +++++++++++++++++++ test/compatibility/node_miner_2/private.key | 1 + test/compatibility/specific_old.sh | 21 ++-- test/integration/node_0/config.yaml | 16 +-- test/integration/node_1/config.yaml | 16 +-- test/integration/node_2/config.yaml | 16 +-- test/integration/node_c/config.yaml | 16 +-- test/integration/node_miner_0/config.yaml | 16 +-- test/integration/node_miner_1/config.yaml | 16 +-- test/integration/node_miner_2/config.yaml | 16 +-- 23 files changed, 920 insertions(+), 69 deletions(-) create mode 100644 test/compatibility/node_0/config.yaml create mode 100644 test/compatibility/node_0/private.key create mode 100644 test/compatibility/node_1/config.yaml create mode 100644 test/compatibility/node_1/private.key create mode 100644 test/compatibility/node_2/config.yaml create mode 100644 test/compatibility/node_2/private.key create mode 100644 test/compatibility/node_c/config.yaml create mode 100644 test/compatibility/node_c/private.key create mode 100644 test/compatibility/node_miner_0/config.yaml create mode 100644 test/compatibility/node_miner_0/private.key create mode 100644 test/compatibility/node_miner_1/config.yaml create mode 100644 test/compatibility/node_miner_1/private.key create mode 100644 test/compatibility/node_miner_2/config.yaml create mode 100644 test/compatibility/node_miner_2/private.key diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 427df1acf..4865840e5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -11,7 +11,7 @@ variables: CODECOV_TOKEN: $CODECOV_TOKEN UNITTESTTAGS: linux sqlite_omit_load_extension CACHE_DIR: /CovenantSQL_bins - PIPLINE_CACHE: $CACHE_DIR/$CI_PIPELINE_IID + PIPELINE_CACHE: $CACHE_DIR/$CI_PIPELINE_IID BIN_CACHE: $CACHE_DIR/$CI_PIPELINE_IID/bin PREV_VERSION: v0.4.0 # gitlabci bins: 192.168.2.100:/srv/gitlab-runner/config/CovenantSQL_bins @@ -96,5 +96,5 @@ old-miner-compatibility: cleanup_cache: stage: cleanup script: - - rm -r ${PIPLINE_CACHE} + - rm -r ${PIPELINE_CACHE} when: on_success diff --git a/test/compatibility/node_0/config.yaml b/test/compatibility/node_0/config.yaml new file mode 100644 index 000000000..cd1a54cda --- /dev/null +++ b/test/compatibility/node_0/config.yaml @@ -0,0 +1,122 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3122" +ThisNodeID: "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_0/private.key b/test/compatibility/node_0/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/compatibility/node_0/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/compatibility/node_1/config.yaml b/test/compatibility/node_1/config.yaml new file mode 100644 index 000000000..eca03e17d --- /dev/null +++ b/test/compatibility/node_1/config.yaml @@ -0,0 +1,122 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3121" +ThisNodeID: "00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_1/private.key b/test/compatibility/node_1/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/compatibility/node_1/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/compatibility/node_2/config.yaml b/test/compatibility/node_2/config.yaml new file mode 100644 index 000000000..30a422bc3 --- /dev/null +++ b/test/compatibility/node_2/config.yaml @@ -0,0 +1,122 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3120" +ThisNodeID: "000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_2/private.key b/test/compatibility/node_2/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/compatibility/node_2/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/compatibility/node_c/config.yaml b/test/compatibility/node_c/config.yaml new file mode 100644 index 000000000..84320e6c2 --- /dev/null +++ b/test/compatibility/node_c/config.yaml @@ -0,0 +1,122 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:3120" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_c/private.key b/test/compatibility/node_c/private.key new file mode 100644 index 0000000000000000000000000000000000000000..f563980c1fcd669303b1bee9c2172bf5a3519b8c GIT binary patch literal 96 zcmV-m0H6PF*slzHCqzPE3aw^kxJ?Q%G%ogw14*THn=7~eV;?h-t?#^t5W+6R^1DgL z$@60LgW8>L#Ft4anW%5%J6f5~?krWm@CHc~TLX=J0P-Na@n`wgY{PEN*;2omcYC0; Ca4_ls literal 0 HcmV?d00001 diff --git a/test/compatibility/node_miner_0/config.yaml b/test/compatibility/node_miner_0/config.yaml new file mode 100644 index 000000000..ce99baf98 --- /dev/null +++ b/test/compatibility/node_miner_0/config.yaml @@ -0,0 +1,118 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:2144" +ThisNodeID: "000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "60s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_miner_0/private.key b/test/compatibility/node_miner_0/private.key new file mode 100644 index 000000000..12e7d3d80 --- /dev/null +++ b/test/compatibility/node_miner_0/private.key @@ -0,0 +1 @@ +8s_/W-7IyH_DyTG*M9C#8p%x>SߪRLmPB>{:̜뢷|| \ No newline at end of file diff --git a/test/compatibility/node_miner_1/config.yaml b/test/compatibility/node_miner_1/config.yaml new file mode 100644 index 000000000..6cec5bbc7 --- /dev/null +++ b/test/compatibility/node_miner_1/config.yaml @@ -0,0 +1,118 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:2145" +ThisNodeID: "000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "60s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_miner_1/private.key b/test/compatibility/node_miner_1/private.key new file mode 100644 index 000000000..44e8915e6 --- /dev/null +++ b/test/compatibility/node_miner_1/private.key @@ -0,0 +1,2 @@ +s]](o3R +D5*9C 7ZinƋSp*SS5^ޑax>Xо2#IxRw+Ŕ \ No newline at end of file diff --git a/test/compatibility/node_miner_2/config.yaml b/test/compatibility/node_miner_2/config.yaml new file mode 100644 index 000000000..ec0ac5060 --- /dev/null +++ b/test/compatibility/node_miner_2/config.yaml @@ -0,0 +1,118 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:2146" +ThisNodeID: "000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8" +QPS: 1000 +BillingBlockCount: 2 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 10000000000000000000 + CovenantCoinBalance: 10000000000000000000 +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "60s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:3122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:3121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:3120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:2144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:2145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:2146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/compatibility/node_miner_2/private.key b/test/compatibility/node_miner_2/private.key new file mode 100644 index 000000000..adb437e75 --- /dev/null +++ b/test/compatibility/node_miner_2/private.key @@ -0,0 +1 @@ +6 i.i%8pVVrLBKb: 1;(fF &y췥 RW3?CA;e"K2 \ No newline at end of file diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 182e0f276..eec7174ee 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -4,12 +4,11 @@ set -e TEST_WD=$(cd $(dirname $0)/; pwd) PROJECT_DIR=$(cd ${TEST_WD}/../../; pwd) -cd ${TEST_WD} OLD_BIN_DIR=${CACHE_DIR}/${PREV_VERSION} NEW_BIN_DIR=${PROJECT_DIR}/bin -LOGS_DIR=${PIPLINE_CACHE}/logs/old_${test_case} +LOGS_DIR=${PIPELINE_CACHE}/logs/old_${test_case} mkdir -p ${LOGS_DIR} case $test_case in @@ -33,26 +32,26 @@ case $test_case in ;; esac -2>&1 +cd ${TEST_WD} # start bp -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_0/config.yaml >${LOGS_DIR}/bp0.log 2>&1 & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_1/config.yaml >${LOGS_DIR}/bp1.log 2>&1 & -nohup ${BPBIN} -config ${PROJECT_DIR}/test/integration/node_2/config.yaml >${LOGS_DIR}/bp2.log 2>&1 & +nohup ${BPBIN} -config node_0/config.yaml >${LOGS_DIR}/bp0.log 2>&1 & +nohup ${BPBIN} -config node_1/config.yaml >${LOGS_DIR}/bp1.log 2>&1 & +nohup ${BPBIN} -config node_2/config.yaml >${LOGS_DIR}/bp2.log 2>&1 & # wait bp start sleep 20 # start miner -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_0/config.yaml >${LOGS_DIR}/miner0.log 2>&1 & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_1/config.yaml >${LOGS_DIR}/miner1.log 2>&1 & -nohup ${MINERBIN} -config ${PROJECT_DIR}/test/integration/node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 & +nohup ${MINERBIN} -config node_miner_0/config.yaml >${LOGS_DIR}/miner0.log 2>&1 & +nohup ${MINERBIN} -config node_miner_1/config.yaml >${LOGS_DIR}/miner1.log 2>&1 & +nohup ${MINERBIN} -config node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 & # wait miner start sleep 20 -${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -get-balance +${CLIENTBIN} -config node_c/config.yaml -get-balance -${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt +${CLIENTBIN} -config node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt #get dsn dsn=$(cat dsn.txt) diff --git a/test/integration/node_0/config.yaml b/test/integration/node_0/config.yaml index cd1a54cda..397b65eb8 100644 --- a/test/integration/node_0/config.yaml +++ b/test/integration/node_0/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_1/config.yaml b/test/integration/node_1/config.yaml index eca03e17d..d35061395 100644 --- a/test/integration/node_1/config.yaml +++ b/test/integration/node_1/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_2/config.yaml b/test/integration/node_2/config.yaml index 30a422bc3..85dce477d 100644 --- a/test/integration/node_2/config.yaml +++ b/test/integration/node_2/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_c/config.yaml b/test/integration/node_c/config.yaml index 84320e6c2..f4f265e8c 100644 --- a/test/integration/node_c/config.yaml +++ b/test/integration/node_c/config.yaml @@ -45,17 +45,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 KnownNodes: - ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 Nonce: diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index ce99baf98..ceac395a8 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 Miner: IsTestMode: true RootDir: "./data" diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index 6cec5bbc7..41eb0305b 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 Miner: IsTestMode: true RootDir: "./data" diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index ec0ac5060..51ec8f581 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -36,17 +36,17 @@ BlockProducer: Timestamp: 2018-08-13T21:59:59.12Z BaseAccounts: - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 10000000000000000000 - CovenantCoinBalance: 10000000000000000000 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 Miner: IsTestMode: true RootDir: "./data" From 6df3c8b6cf34218c7175a735e4256bee32043b5d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 28 Feb 2019 18:13:53 +0800 Subject: [PATCH 047/244] Fix transaction directives, add commit to query sanitizer ignore list --- xenomint/query_sanitizer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xenomint/query_sanitizer.go b/xenomint/query_sanitizer.go index 81119fede..25dc32c14 100644 --- a/xenomint/query_sanitizer.go +++ b/xenomint/query_sanitizer.go @@ -76,7 +76,7 @@ var ( func convertQueryAndBuildArgs(pattern string, args []types.NamedArg) (containsDDL bool, p string, ifs []interface{}, err error) { if lower := strings.ToLower(pattern); strings.Contains(lower, "begin") || - strings.Contains(lower, "rollback") { + strings.Contains(lower, "rollback") || strings.Contains(lower, "commit") { return false, pattern, nil, nil } var ( From 29620e1873df91669b6dcee090a9b87c29de1d0b Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 28 Feb 2019 20:07:06 +0800 Subject: [PATCH 048/244] Add MainChain Explorer, SQLChain Explorer, Forum in readme --- README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index b322ab190..41587585f 100644 --- a/README.md +++ b/README.md @@ -107,10 +107,14 @@ Watch us or [![follow on Twitter](https://img.shields.io/twitter/url/https/twitt ## TestNet - [Quick Start](https://developers.covenantsql.io) +- [MainChain Explorer](http://scan.covenantsql.io) +- [SQLChain Explorer](https://explorer.dbhub.org) +- [Demo & Forum](https://demo.covenantsql.io/forum/) ## Contact -- [mail us](mailto:webmaster@covenantsql.io) +- [Mail](mailto:webmaster@covenantsql.io) +- [Forum](https://demo.covenantsql.io/forum/) - follow on Twitter From cb2330a93a61ef552a3c188b7dab5b1ebacdbfd0 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Fri, 1 Mar 2019 10:28:28 +0800 Subject: [PATCH 049/244] Add test case for isolation level --- xenomint/state_test.go | 67 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/xenomint/state_test.go b/xenomint/state_test.go index 8c058d831..16a293c7c 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -22,6 +22,7 @@ import ( "fmt" "os" "path" + "reflect" "sync" "testing" @@ -702,6 +703,72 @@ func TestSerializableState(t *testing.T) { _, resp, err = state.Query(req, true) So(err, ShouldBeNil) So(resp, ShouldNotBeNil) + Convey("The state should keep consistent with committed transaction", func(c C) { + var ( + count = 1000 + insertQueries = make([]types.Query, count+2) + deleteQueries = make([]types.Query, count+2) + iReq, dReq *types.Request + ) + insertQueries[0] = buildQuery(`BEGIN`) + deleteQueries[0] = buildQuery(`BEGIN`) + for i := 0; i < count; i++ { + insertQueries[i+1] = buildQuery( + `INSERT INTO t1(k, v) VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ) + deleteQueries[i+1] = buildQuery(`DELETE FROM t1 WHERE k=?`, i) + } + insertQueries[count+1] = buildQuery(`COMMIT`) + deleteQueries[count+1] = buildQuery(`COMMIT`) + iReq = buildRequest(types.WriteQuery, insertQueries) + dReq = buildRequest(types.WriteQuery, deleteQueries) + + var ( + wg = &sync.WaitGroup{} + ctx, cancel = context.WithCancel(context.Background()) + ) + defer func() { + cancel() + wg.Wait() + }() + wg.Add(1) + go func() { + defer wg.Done() + var ( + resp *types.Response + err error + ) + for { + _, resp, err = state.Query(iReq, true) + c.So(err, ShouldBeNil) + c.Printf("insert affected rows: %d\n", resp.Header.AffectedRows) + _, resp, err = state.Query(dReq, true) + c.So(err, ShouldBeNil) + c.Printf("delete affected rows: %d\n", resp.Header.AffectedRows) + select { + case <-ctx.Done(): + return + default: + } + } + }() + + for i := 0; i < count; i++ { + _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), + }), true) + So(reflect.DeepEqual(resp.Payload, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(0)}}}, + }) || reflect.DeepEqual(resp.Payload, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(count)}}}, + }), ShouldBeTrue) + Printf("index = %d, count = %v\n", i, resp) + } + }) Convey("The state should not see uncommitted changes", func(c C) { // Build transaction query var ( From 25e3f5988d0f3a33d4aa0588e3aaf475a1a0efea Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 1 Mar 2019 12:42:43 +0800 Subject: [PATCH 050/244] Ignore nofile setrlimit in non-linux environment --- xenomint/xxx_test.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go index a670fca3e..b0a7686d0 100644 --- a/xenomint/xxx_test.go +++ b/xenomint/xxx_test.go @@ -21,6 +21,7 @@ import ( "math/rand" "os" "path" + "runtime" "sync" "sync/atomic" "syscall" @@ -198,16 +199,18 @@ func setup() { rand.Seed(time.Now().UnixNano()) - // Set NOFILE limit - if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { - panic(err) - } - if lmt.Max < minNoFile { - panic("insufficient max RLIMIT_NOFILE") - } - lmt.Cur = lmt.Max - if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { - panic(err) + if runtime.GOOS == "linux" { + // Set NOFILE limit + if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } + if lmt.Max < minNoFile { + panic("insufficient max RLIMIT_NOFILE") + } + lmt.Cur = lmt.Max + if err = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lmt); err != nil { + panic(err) + } } // Initialze kms From 896e0605b497f88d432a04ad3e9cfcce8aaaedfc Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 1 Mar 2019 12:43:39 +0800 Subject: [PATCH 051/244] Test begin/commit and error rollback in non read-uncommitted isolation level --- xenomint/state.go | 9 ++--- xenomint/state_test.go | 76 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 81 insertions(+), 4 deletions(-) diff --git a/xenomint/state.go b/xenomint/state.go index e7034a620..7bcbf5557 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -404,6 +404,11 @@ func (s *State) write( } defer s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) } + if s.level != sql.LevelReadUncommitted { + // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to + // committed ones. + defer s.executer.Exec(`ROLLBACK`) + } for i, v := range req.Payload.Queries { var res sql.Result if res, ierr = s.writeSingle(ctx, &v); ierr != nil { @@ -426,10 +431,6 @@ func (s *State) write( return } } - } else { - // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to - // committed ones. - s.executer.Exec(`ROLLBACK`) } // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || diff --git a/xenomint/state_test.go b/xenomint/state_test.go index 16a293c7c..9811f38be 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -818,6 +818,82 @@ func TestSerializableState(t *testing.T) { }) } }) + Convey("The state should see changes", FailureContinues, func(c C) { + // Build transaction query + var ( + count = 1000 + queries = make([]types.Query, count+2) + req *types.Request + ) + queries[0] = buildQuery(`BEGIN`) + for i := 0; i < count; i++ { + queries[i+1] = buildQuery( + `INSERT INTO t1(k, v) VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ) + } + queries[count+1] = buildQuery(`COMMIT`) + req = buildRequest(types.WriteQuery, queries) + // Send uncommitted transaction on background + var _, resp, err = state.Query(req, true) + c.So(err, ShouldBeNil) + c.So(resp.Header.RowCount, ShouldEqual, 0) + + // Test isolation level + for i := 0; i < count; i++ { + _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), + }), true) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(count)}}}, + }) + } + + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery("DELETE FROM t1"), + }) + _, resp, err = state.Query(req, true) + c.So(err, ShouldBeNil) + }) + Convey("The state should not see changes because of failure query content", FailureContinues, func(c C) { + // Build transaction query + var ( + count = 1000 + queries = make([]types.Query, count+3) + req *types.Request + ) + queries[0] = buildQuery(`BEGIN`) + for i := 0; i < count; i++ { + queries[i+1] = buildQuery( + `INSERT INTO t1(k, v) VALUES (?, ?)`, i, fmt.Sprintf("v%d", i), + ) + } + queries[count+1] = buildQuery(`HAHA`) + queries[count+2] = buildQuery(`COMMIT`) + req = buildRequest(types.WriteQuery, queries) + // Send uncommitted transaction on background + var _, resp, err = state.Query(req, true) + c.So(err, ShouldNotBeNil) + + // Test isolation level + for i := 0; i < count; i++ { + _, resp, err = state.Query(buildRequest(types.ReadQuery, []types.Query{ + buildQuery(`SELECT COUNT(1) AS cnt FROM t1`), + }), true) + So(resp.Payload, ShouldResemble, types.ResponsePayload{ + Columns: []string{"cnt"}, + DeclTypes: []string{""}, + Rows: []types.ResponseRow{{Values: []interface{}{int64(0)}}}, + }) + } + + req = buildRequest(types.WriteQuery, []types.Query{ + buildQuery("DELETE FROM t1"), + }) + _, resp, err = state.Query(req, true) + c.So(err, ShouldBeNil) + }) }) }) } From 2cc39a8524a3ce869e00648ad8873f16a24e1884 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 28 Feb 2019 20:21:32 +0800 Subject: [PATCH 052/244] Format logs and remove load chain method --- sqlchain/blockindex.go | 27 +--- sqlchain/blockindex_test.go | 19 +-- sqlchain/chain.go | 312 +++++++++++------------------------- 3 files changed, 105 insertions(+), 253 deletions(-) diff --git a/sqlchain/blockindex.go b/sqlchain/blockindex.go index 4256b675f..6abe47010 100644 --- a/sqlchain/blockindex.go +++ b/sqlchain/blockindex.go @@ -33,31 +33,16 @@ type blockNode struct { } func newBlockNode(height int32, block *types.Block, parent *blockNode) *blockNode { + var count int32 + if parent != nil { + count = parent.count + 1 + } return &blockNode{ - hash: *block.BlockHash(), + hash: block.SignedHeader.HSV.DataHash, parent: parent, block: block, height: height, - count: func() int32 { - if parent != nil { - return parent.count + 1 - } - - return 0 - }(), - } -} - -func (n *blockNode) initBlockNode(height int32, block *types.Block, parent *blockNode) { - n.block = block - n.hash = *block.BlockHash() - n.parent = nil - n.height = height - n.count = 0 - - if parent != nil { - n.parent = parent - n.count = parent.count + 1 + count: count, } } diff --git a/sqlchain/blockindex_test.go b/sqlchain/blockindex_test.go index e372980c8..0d486347b 100644 --- a/sqlchain/blockindex_test.go +++ b/sqlchain/blockindex_test.go @@ -19,7 +19,6 @@ package sqlchain import ( "testing" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/types" ) @@ -74,20 +73,7 @@ func TestNewBlockNode(t *testing.T) { } func TestInitBlockNode(t *testing.T) { - parent := &blockNode{ - parent: nil, - hash: hash.Hash{}, - count: -1, - } - - child := &blockNode{ - parent: nil, - hash: hash.Hash{}, - count: -1, - } - - parent.initBlockNode(0, testBlocks[0], nil) - + parent := newBlockNode(0, testBlocks[0], nil) if parent == nil { t.Fatal("unexpected result: nil") } else if parent.parent != nil { @@ -96,8 +82,7 @@ func TestInitBlockNode(t *testing.T) { t.Fatalf("unexpected height: %d", parent.count) } - child.initBlockNode(1, testBlocks[1], parent) - + child := newBlockNode(1, testBlocks[1], parent) if child == nil { t.Fatal("unexpected result: nil") } else if child.parent != parent { diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 6f421c372..cd1e402a6 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -22,7 +22,6 @@ import ( "database/sql" "encoding/binary" "fmt" - "os" rt "runtime" "sync" "sync/atomic" @@ -66,7 +65,7 @@ func init() { leveldbConf.Compression = opt.SnappyCompression } -func statBlock(b *types.Block) { +func trackBlock(b *types.Block) { atomic.AddInt32(&cachedBlockCount, 1) rt.SetFinalizer(b, func(_ *types.Block) { atomic.AddInt32(&cachedBlockCount, -1) @@ -128,104 +127,26 @@ type Chain struct { addr *proto.AccountAddress } -// NewChain creates a new sql-chain struct. -func NewChain(c *Config) (chain *Chain, err error) { - return NewChainWithContext(context.Background(), c) -} - -// NewChainWithContext creates a new sql-chain struct with context. -func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err error) { - // TODO(leventeliu): this is a rough solution, you may also want to clean database file and - // force rebuilding. - var fi os.FileInfo - if fi, err = os.Stat(c.ChainFilePrefix + "-block-state.ldb"); err == nil && fi.Mode().IsDir() { - return LoadChain(c) - } - - err = c.Genesis.VerifyAsGenesis() - if err != nil { - return - } - - // Open LevelDB for block and state - bdbFile := c.ChainFilePrefix + "-block-state.ldb" - bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) - if err != nil { - err = errors.Wrapf(err, "open leveldb %s", bdbFile) - return - } - - log.WithField("db", c.DatabaseID).Debugf("create new chain bdb %s", bdbFile) - - // Open LevelDB for ack/request/response - tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" - tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) - if err != nil { - err = errors.Wrapf(err, "open leveldb %s", tdbFile) - return - } - - log.WithField("db", c.DatabaseID).Debugf("create new chain tdb %s", tdbFile) - - // Open storage - var strg xi.Storage - if strg, err = xs.NewSqlite(c.DataFile); err != nil { +func (c *Chain) genesis(b *types.Block) (err error) { + if b == nil { + err = errors.New("genesis block not provided") return } - - // Cache local private key - var ( - pk *asymmetric.PrivateKey - addr proto.AccountAddress - ) - if pk, err = kms.GetLocalPrivateKey(); err != nil { - err = errors.Wrap(err, "failed to cache private key") + if err = b.VerifyAsGenesis(); err != nil { + err = errors.Wrap(err, "initialize chain state") return } - addr, err = crypto.PubKeyHash(pk.PubKey()) - if err != nil { - log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in NewChain") - return - } - - // Create chain state - chain = &Chain{ - bdb: bdb, - tdb: tdb, - bi: newBlockIndex(), - ai: newAckIndex(), - st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), - cl: rpc.NewCaller(), - rt: newRunTime(ctx, c), - ctx: ctx, - blocks: make(chan *types.Block), - heights: make(chan int32, 1), - responses: make(chan *types.ResponseHeader), - acks: make(chan *types.AckHeader), - tokenType: c.TokenType, - gasPrice: c.GasPrice, - updatePeriod: c.UpdatePeriod, - databaseID: c.DatabaseID, - - pk: pk, - addr: &addr, - } - - if err = chain.pushBlock(c.Genesis); err != nil { - return nil, err - } - - return + return c.pushBlock(b) } -// LoadChain loads the chain state from the specified database and rebuilds a memory index. -func LoadChain(c *Config) (chain *Chain, err error) { - return LoadChainWithContext(context.Background(), c) +// NewChain creates a new sql-chain struct. +func NewChain(c *Config) (chain *Chain, err error) { + return NewChainWithContext(context.Background(), c) } -// LoadChainWithContext loads the chain state from the specified database and rebuilds -// a memory index with context. -func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err error) { +// NewChainWithContext creates a new sql-chain struct with context. +func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err error) { + le := log.WithField("db", c.DatabaseID) // Open LevelDB for block and state bdbFile := c.ChainFilePrefix + "-block-state.ldb" bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) @@ -233,6 +154,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err err = errors.Wrapf(err, "open leveldb %s", bdbFile) return } + le.Debugf("opened chain bdb %s", bdbFile) // Open LevelDB for ack/request/response tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" @@ -241,10 +163,12 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err err = errors.Wrapf(err, "open leveldb %s", tdbFile) return } + le.Debugf("opened chain tdb %s", tdbFile) - // Open x.State + // Open storage var strg xi.Storage if strg, err = xs.NewSqlite(c.DataFile); err != nil { + err = errors.Wrapf(err, "open data file %s", c.DataFile) return } @@ -259,7 +183,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } addr, err = crypto.PubKeyHash(pk.PubKey()) if err != nil { - log.WithError(err).WithField("db", c.DatabaseID).Warning("failed to generate addr in LoadChain") + err = errors.Wrap(err, "failed to generate address") return } @@ -285,38 +209,39 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err pk: pk, addr: &addr, } + le = le.WithField("peer", chain.rt.getPeerInfoString()) // Read state struct stateEnc, err := chain.bdb.Get(metaState[:], nil) if err != nil { - return nil, err + if err != leveldb.ErrNotFound { + err = errors.Wrap(err, "fetch head state") + return + } + // err == leveldb.ErrNotFound, chain is in initial state + err = chain.genesis(c.Genesis) + return } + st := &state{} if err = utils.DecodeMsgPack(stateEnc, st); err != nil { + err = errors.Wrap(err, "decode head state") return nil, err } - - log.WithFields(log.Fields{ - "peer": chain.rt.getPeerInfoString(), - "state": st, - "db": c.DatabaseID, - }).Debug("loading state from database") + le.WithField("state", st).Debug("loading state from database") // Read blocks and rebuild memory index var ( - id uint64 - index int32 - last *blockNode - blockIter = chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) + id uint64 + last, parent *blockNode + blockIter = chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) ) defer blockIter.Release() - for index = 0; blockIter.Next(); index++ { + for blockIter.Next() { var ( k = blockIter.Key() v = blockIter.Value() block = &types.Block{} - - current, parent *blockNode ) if err = utils.DecodeMsgPack(v, block); err != nil { @@ -324,11 +249,7 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err keyWithSymbolToHeight(k), string(k)) return } - log.WithFields(log.Fields{ - "peer": chain.rt.getPeerInfoString(), - "block": block.BlockHash().String(), - "db": c.DatabaseID, - }).Debug("loading block from database") + le.WithField("block", block.BlockHash().String()).Debug("loading block from database") if last == nil { if err = block.VerifyAsGenesis(); err != nil { @@ -355,13 +276,11 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err id = nid } - current = &blockNode{} - current.initBlockNode(chain.rt.getHeightFromTime(block.Timestamp()), block, parent) - chain.bi.addBlock(current) - last = current + last = newBlockNode(chain.rt.getHeightFromTime(block.Timestamp()), block, parent) + chain.bi.addBlock(last) } if err = blockIter.Error(); err != nil { - err = errors.Wrap(err, "load block") + err = errors.Wrap(err, "accumulated error of iterator") return } @@ -417,6 +336,8 @@ func LoadChainWithContext(ctx context.Context, c *Config) (chain *Chain, err err } return + + return } // pushBlock pushes the signed block header to extend the current main chain. @@ -461,24 +382,25 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { c.bi.addBlock(node) // Keep track of the queries from the new block - var ierr error + var ( + ierr error + le = log.WithFields(log.Fields{ + "db": c.databaseID, + "producer": b.Producer(), + "block_hash": b.BlockHash(), + }) + ) for i, v := range b.QueryTxs { if ierr = c.AddResponse(v.Response); ierr != nil { - log.WithFields(log.Fields{ - "index": i, - "producer": b.Producer(), - "block_hash": b.BlockHash(), - "db": c.databaseID, + le.WithFields(log.Fields{ + "index": i, }).WithError(ierr).Warn("failed to add response to ackIndex") } } for i, v := range b.Acks { if ierr = c.remove(v); ierr != nil { - log.WithFields(log.Fields{ - "index": i, - "producer": b.Producer(), - "block_hash": b.BlockHash(), - "db": c.databaseID, + le.WithFields(log.Fields{ + "index": i, }).WithError(ierr).Warn("failed to remove Ack from ackIndex") } } @@ -558,7 +480,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { QueryTxs: make([]*types.QueryAsTx, len(qts)), Acks: c.ai.acks(c.rt.getHeightFromTime(now)), } - statBlock(block) + trackBlock(block) for i, v := range qts { // TODO(leventeliu): maybe block waiting at a ready channel instead? for !v.Ready() { @@ -675,7 +597,7 @@ func (c *Chain) syncHead() { }).WithError(err).Debug( "Failed to fetch block from peer") } else { - statBlock(resp.Block) + trackBlock(resp.Block) select { case c.blocks <- resp.Block: case <-c.rt.ctx.Done(): @@ -724,44 +646,39 @@ func (c *Chain) runCurrentTurn(now time.Time) { c.heights <- c.rt.getHead().Height }() - log.WithFields(log.Fields{ + var le = log.WithFields(log.Fields{ + "db": c.databaseID, "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), "curr_turn": c.rt.getNextTurn(), "head_height": c.rt.getHead().Height, "head_block": c.rt.getHead().Head.String(), "using_timestamp": now.Format(time.RFC3339Nano), - "db": c.databaseID, - }).Debug("run current turn") + }) + le.Debug("run current turn") if c.rt.getHead().Height < c.rt.getNextTurn()-1 { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - "using_timestamp": now.Format(time.RFC3339Nano), - "db": c.databaseID, - }).Error("A block will be skipped") + le.Error("a block will be skipped") } - if !c.rt.isMyTurn() { return } - if err := c.produceBlock(now); err != nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "using_timestamp": now.Format(time.RFC3339Nano), - "db": c.databaseID, - }).WithError(err).Error( - "Failed to produce block") + le.WithError(err).Error("failed to produce block") } } +func (c *Chain) logEntryWithChainStatus() *log.Entry { + return log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + "curr_turn": c.rt.getNextTurn(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), + }) +} + // mainCycle runs main cycle of the sql-chain. func (c *Chain) mainCycle(ctx context.Context) { for { @@ -770,18 +687,7 @@ func (c *Chain) mainCycle(ctx context.Context) { return default: c.syncHead() - if t, d := c.rt.nextTick(); d > 0 { - //log.WithFields(log.Fields{ - // "peer": c.rt.getPeerInfoString(), - // "time": c.rt.getChainTimeString(), - // "next_turn": c.rt.getNextTurn(), - // "head_height": c.rt.getHead().Height, - // "head_block": c.rt.getHead().Head.String(), - // "using_timestamp": t.Format(time.RFC3339Nano), - // "duration": d, - // "db": c.databaseID, - //}).Debug("main cycle") time.Sleep(d) } else { c.runCurrentTurn(t) @@ -801,11 +707,9 @@ func (c *Chain) sync() (err error) { for { now := c.rt.now() height := c.rt.getHeightFromTime(now) - if c.rt.getNextTurn() >= height { break } - for c.rt.getNextTurn() <= height { // TODO(leventeliu): fetch blocks and queries. c.rt.setNextTurn() @@ -841,14 +745,22 @@ func (c *Chain) processBlocks(ctx context.Context) { stash []*types.Block ) for { + le := log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + "curr_turn": c.rt.getNextTurn(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), + }) select { case h := <-c.heights: // Return all stashed blocks to pending channel - log.WithFields(log.Fields{ + le.WithFields(log.Fields{ "height": h, "stashs": len(stash), - "db": c.databaseID, }).Debug("read new height from channel") + if stash != nil { wg.Add(1) go returnStash(stash) @@ -856,16 +768,11 @@ func (c *Chain) processBlocks(ctx context.Context) { } case block := <-c.blocks: height := c.rt.getHeightFromTime(block.Timestamp()) - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), + le = le.WithFields(log.Fields{ "block_height": height, "block_hash": block.BlockHash().String(), - "db": c.databaseID, - }).Debug("processing new block") + }) + le.Debug("processing new block") if height > c.rt.getNextTurn()-1 { // Stash newer blocks for later check @@ -876,23 +783,14 @@ func (c *Chain) processBlocks(ctx context.Context) { // TODO(leventeliu): check and add to fork list. } else { if err := c.CheckAndPushNewBlock(block); err != nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - "block_height": height, - "block_hash": block.BlockHash().String(), - "db": c.databaseID, - }).WithError(err).Error("Failed to check and push new block") + le.WithError(err).Error("failed to check and push new block") } else { head := c.rt.getHead() currentCount := uint64(head.node.count) if currentCount%c.updatePeriod == 0 { ub, err := c.billing(head.node) if err != nil { - log.WithError(err).WithField("db", c.databaseID).Error("billing failed") + le.WithError(err).Error("billing failed") } // allocate nonce nonceReq := &types.NextAccountNonceReq{} @@ -900,20 +798,20 @@ func (c *Chain) processBlocks(ctx context.Context) { nonceReq.Addr = *c.addr if err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { // allocate nonce failed - log.WithError(err).WithField("db", c.databaseID).Warning("allocate nonce for transaction failed") + le.WithError(err).Warning("allocate nonce for transaction failed") } ub.Nonce = nonceResp.Nonce if err = ub.Sign(c.pk); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("sign tx failed") + le.WithError(err).Warning("sign tx failed") } addTxReq := &types.AddTxReq{TTL: 1} addTxResp := &types.AddTxResp{} addTxReq.Tx = ub - log.WithField("db", c.databaseID).Debugf("nonce in processBlocks: %d, addr: %s", + le.Debugf("nonce in processBlocks: %d, addr: %s", addTxReq.Tx.GetAccountNonce(), addTxReq.Tx.GetAccountAddress()) if err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("send tx failed") + le.WithError(err).Warning("send tx failed") } } } @@ -940,44 +838,29 @@ func (c *Chain) Start() (err error) { // Stop stops the main process of the sql-chain. func (c *Chain) Stop() (err error) { // Stop main process - log.WithFields(log.Fields{ + le := log.WithFields(log.Fields{ "peer": c.rt.getPeerInfoString(), "time": c.rt.getChainTimeString(), "db": c.databaseID, - }).Debug("stopping chain") + }) + le.Debug("stopping chain") c.rt.stop(c.databaseID) - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }).Debug("chain service and workers stopped") + le.Debug("chain service and workers stopped") // Close LevelDB file var ierr error if ierr = c.bdb.Close(); ierr != nil && err == nil { err = ierr } - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }).WithError(ierr).Debug("chain database closed") + le.WithError(ierr).Debug("chain database closed") if ierr = c.tdb.Close(); ierr != nil && err == nil { err = ierr } - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }).WithError(ierr).Debug("chain database closed") + le.WithError(ierr).Debug("chain database closed") // Close state if ierr = c.st.Close(false); ierr != nil && err == nil { err = ierr } - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }).WithError(ierr).Debug("chain state storage closed") + le.WithError(ierr).Debug("chain state storage closed") return } @@ -1026,7 +909,7 @@ func (c *Chain) fetchBlockByIndexKey(indexKey []byte) (b *types.Block, err error } b = &types.Block{} - statBlock(b) + trackBlock(b) err = utils.DecodeMsgPack(v, b) if err != nil { err = errors.Wrapf(err, "fetch block %s", string(k)) @@ -1184,7 +1067,6 @@ func (c *Chain) stat() { "response_header_count": rc, "query_tracker_count": tc, "cached_block_count": bc, - "db": c.databaseID, }).Info("chain mem stats") // Print xeno stats c.st.Stat(c.databaseID) From be56419bfc554ccd775f54bd58130d066728023e Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 4 Mar 2019 15:20:11 +0800 Subject: [PATCH 053/244] Remove head state persistence Head state can be restored from blocks. --- sqlchain/chain.go | 122 ++++++++++++++++------------------------- sqlchain/runtime.go | 7 +++ sqlchain/state.go | 42 -------------- sqlchain/state_test.go | 60 -------------------- 4 files changed, 55 insertions(+), 176 deletions(-) delete mode 100644 sqlchain/state.go delete mode 100644 sqlchain/state_test.go diff --git a/sqlchain/chain.go b/sqlchain/chain.go index cd1e402a6..738f75a9b 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -51,7 +51,6 @@ const ( ) var ( - metaState = [4]byte{'S', 'T', 'A', 'T'} metaBlockIndex = [4]byte{'B', 'L', 'C', 'K'} metaResponseIndex = [4]byte{'R', 'E', 'S', 'P'} metaAckIndex = [4]byte{'Q', 'A', 'C', 'K'} @@ -211,25 +210,6 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro } le = le.WithField("peer", chain.rt.getPeerInfoString()) - // Read state struct - stateEnc, err := chain.bdb.Get(metaState[:], nil) - if err != nil { - if err != leveldb.ErrNotFound { - err = errors.Wrap(err, "fetch head state") - return - } - // err == leveldb.ErrNotFound, chain is in initial state - err = chain.genesis(c.Genesis) - return - } - - st := &state{} - if err = utils.DecodeMsgPack(stateEnc, st); err != nil { - err = errors.Wrap(err, "decode head state") - return nil, err - } - le.WithField("state", st).Debug("loading state from database") - // Read blocks and rebuild memory index var ( id uint64 @@ -284,9 +264,21 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } + // Initiate chain Genesis if block list is empty + if last == nil { + if err = chain.genesis(c.Genesis); err != nil { + return nil, err + } + return + } + // Set chain state - st.node = last - chain.rt.setHead(st) + var head = &state{ + node: last, + Head: last.hash, + Height: last.height, + } + chain.rt.setHead(head) chain.st.SetSeq(id) chain.pruneBlockCache() @@ -336,49 +328,34 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro } return - - return } // pushBlock pushes the signed block header to extend the current main chain. func (c *Chain) pushBlock(b *types.Block) (err error) { // Prepare and encode - h := c.rt.getHeightFromTime(b.Timestamp()) - node := newBlockNode(h, b, c.rt.getHead().node) - st := &state{ - node: node, - Head: node.hash, - Height: node.height, - } - var encBlock, encState *bytes.Buffer + var ( + h = c.rt.getHeightFromTime(b.Timestamp()) + node = newBlockNode(h, b, c.rt.getHead().node) + head = &state{ + node: node, + Head: node.hash, + Height: node.height, + } + blockKey = utils.ConcatAll(metaBlockIndex[:], node.indexKey()) + encBlock *bytes.Buffer + ) if encBlock, err = utils.EncodeMsgPack(b); err != nil { return } - if encState, err = utils.EncodeMsgPack(st); err != nil { - return - } - - // Update in transaction - t, err := c.bdb.OpenTransaction() - if err = t.Put(metaState[:], encState.Bytes(), nil); err != nil { - err = errors.Wrapf(err, "put %s", string(metaState[:])) - t.Discard() - return - } - blockKey := utils.ConcatAll(metaBlockIndex[:], node.indexKey()) - if err = t.Put(blockKey, encBlock.Bytes(), nil); err != nil { + // Put block + err = c.bdb.Put(blockKey, encBlock.Bytes(), nil) + if err != nil { err = errors.Wrapf(err, "put %s", string(node.indexKey())) - t.Discard() return } - if err = t.Commit(); err != nil { - err = errors.Wrapf(err, "commit error") - t.Discard() - return - } - c.rt.setHead(st) + c.rt.setHead(head) c.bi.addBlock(node) // Keep track of the queries from the new block @@ -405,28 +382,25 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { } } - if err == nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString()[:14], - "time": c.rt.getChainTimeString(), - "block": b.BlockHash().String()[:8], - "producer": b.Producer()[:8], - "queryCount": len(b.QueryTxs), - "ackCount": len(b.Acks), - "blockTime": b.Timestamp().Format(time.RFC3339Nano), - "height": c.rt.getHeightFromTime(b.Timestamp()), - "head": fmt.Sprintf("%s <- %s", - func() string { - if st.node.parent != nil { - return st.node.parent.hash.String()[:8] - } - return "|" - }(), st.Head.String()[:8]), - "headHeight": c.rt.getHead().Height, - "db": c.databaseID, - }).Info("pushed new block") - } - + log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString()[:14], + "time": c.rt.getChainTimeString(), + "block": b.BlockHash().String()[:8], + "producer": b.Producer()[:8], + "queryCount": len(b.QueryTxs), + "ackCount": len(b.Acks), + "blockTime": b.Timestamp().Format(time.RFC3339Nano), + "height": c.rt.getHeightFromTime(b.Timestamp()), + "head": fmt.Sprintf("%s <- %s", + func() string { + if head.node.parent != nil { + return head.node.parent.hash.String()[:8] + } + return "|" + }(), head.Head.String()[:8]), + "headHeight": c.rt.getHead().Height, + }).Info("pushed new block") return } diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index 88c680d7e..baf1c82f1 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -28,6 +28,13 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) +// state represents a snapshot of current best chain. +type state struct { + node *blockNode + Head hash.Hash + Height int32 +} + // runtime represents a chain runtime state. type runtime struct { wg *sync.WaitGroup diff --git a/sqlchain/state.go b/sqlchain/state.go deleted file mode 100644 index 9f7c00593..000000000 --- a/sqlchain/state.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -import ( - "github.com/CovenantSQL/CovenantSQL/crypto/hash" -) - -// state represents a snapshot of current best chain. -type state struct { - node *blockNode - Head hash.Hash - Height int32 -} - -//// MarshalHash marshals for hash -//func (s *state) MarshalHash() ([]byte, error) { -// buffer := bytes.NewBuffer(nil) -// -// if err := utils.WriteElements(buffer, binary.BigEndian, -// s.Head, -// s.Height, -// ); err != nil { -// return nil, err -// } -// -// return buffer.Bytes(), nil -//} diff --git a/sqlchain/state_test.go b/sqlchain/state_test.go deleted file mode 100644 index 622e8bce1..000000000 --- a/sqlchain/state_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sqlchain - -import ( - "math/rand" - "reflect" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/utils" -) - -func TestState(t *testing.T) { - st := &state{ - node: nil, - Head: hash.Hash{}, - Height: 0, - } - - rand.Read(st.Head[:]) - buffer, err := utils.EncodeMsgPack(st) - - if err != nil { - t.Fatalf("error occurred: %v", err) - } - - rState := &state{} - err = utils.DecodeMsgPack(buffer.Bytes(), rState) - - if err != nil { - t.Fatalf("error occurred: %v", err) - } - - //err = rState.UnmarshalBinary(nil) - // - //if err != nil { - // t.Logf("Error occurred as expected: %v", err) - //} else { - // t.Fatal("unexpected result: returned nil while expecting an error") - //} - - if !reflect.DeepEqual(st, rState) { - t.Fatalf("values don't match: v1 = %v, v2 = %v", st, rState) - } -} From 2d0717c3c88dfb3b4eb442f9a755828c9b1d55d2 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 4 Mar 2019 17:33:31 +0800 Subject: [PATCH 054/244] Add logging http debug handler --- utils/log/debug/handler.go | 53 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 utils/log/debug/handler.go diff --git a/utils/log/debug/handler.go b/utils/log/debug/handler.go new file mode 100644 index 000000000..45a3fa45b --- /dev/null +++ b/utils/log/debug/handler.go @@ -0,0 +1,53 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package debug + +import ( + "encoding/json" + "net/http" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func init() { + http.HandleFunc("/debug/covenantsql/loglevel", + func(w http.ResponseWriter, req *http.Request) { + data := map[string]interface{}{} + switch req.Method { + case http.MethodPost: + level := req.FormValue("level") + data["orig"] = log.GetLevel().String() + if level != "" { + data["want"] = level + lvl, err := log.ParseLevel(level) + if err != nil { + data["err"] = err.Error() + } else { + // set level + log.SetLevel(lvl) + } + } + fallthrough + case http.MethodGet: + data["level"] = log.GetLevel().String() + _ = json.NewEncoder(w).Encode(data) + default: + w.WriteHeader(http.StatusBadRequest) + } + }, + ) +} From 1d346be8571e0d58f3ebaa578550ffecf9988af5 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 4 Mar 2019 17:38:31 +0800 Subject: [PATCH 055/244] Enable log level switcher in cql-minerd and cqld --- cmd/cql-minerd/main.go | 1 + cmd/cqld/main.go | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/cql-minerd/main.go b/cmd/cql-minerd/main.go index 129f16599..f8c02c561 100644 --- a/cmd/cql-minerd/main.go +++ b/cmd/cql-minerd/main.go @@ -37,6 +37,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + _ "github.com/CovenantSQL/CovenantSQL/utils/log/debug" "github.com/CovenantSQL/CovenantSQL/utils/trace" "github.com/CovenantSQL/CovenantSQL/worker" ) diff --git a/cmd/cqld/main.go b/cmd/cqld/main.go index 00e7fd1bb..f69ee9594 100644 --- a/cmd/cqld/main.go +++ b/cmd/cqld/main.go @@ -31,6 +31,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/metric" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + _ "github.com/CovenantSQL/CovenantSQL/utils/log/debug" ) const logo = ` From cd0520715603047ea716e444289283ce2561ffdf Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 4 Mar 2019 18:42:08 +0800 Subject: [PATCH 056/244] Fix format string of errors wrap --- worker/dbms.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/worker/dbms.go b/worker/dbms.go index 9ea0622d8..e12641249 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -541,17 +541,17 @@ func (dbms *DBMS) checkPermission(addr proto.AccountAddress, switch queryType { case types.ReadQuery: if !permStat.Permission.HasReadPermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %v", permStat.Permission) return } case types.WriteQuery: if !permStat.Permission.HasWritePermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %v", permStat.Permission) return } default: err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) + "invalid permission, permission: %v", permStat.Permission) return } From 57931d4c75b1f347266c70b1de6e572dc5e8d449 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 4 Mar 2019 20:54:56 +0800 Subject: [PATCH 057/244] Format logs and remove unused fields in chain --- sqlchain/chain.go | 91 ++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 60 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 738f75a9b..9efa93a77 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -54,16 +54,14 @@ var ( metaBlockIndex = [4]byte{'B', 'L', 'C', 'K'} metaResponseIndex = [4]byte{'R', 'E', 'S', 'P'} metaAckIndex = [4]byte{'Q', 'A', 'C', 'K'} - leveldbConf = opt.Options{} + leveldbConf = opt.Options{ + Compression: opt.SnappyCompression, + } // Atomic counters for stats cachedBlockCount int32 ) -func init() { - leveldbConf.Compression = opt.SnappyCompression -} - func trackBlock(b *types.Block) { atomic.AddInt32(&cachedBlockCount, 1) rt.SetFinalizer(b, func(_ *types.Block) { @@ -105,7 +103,6 @@ type Chain struct { st *x.State cl *rpc.Caller rt *runtime - ctx context.Context // ctx is the root context of Chain blocks chan *types.Block heights chan int32 @@ -126,18 +123,6 @@ type Chain struct { addr *proto.AccountAddress } -func (c *Chain) genesis(b *types.Block) (err error) { - if b == nil { - err = errors.New("genesis block not provided") - return - } - if err = b.VerifyAsGenesis(); err != nil { - err = errors.Wrap(err, "initialize chain state") - return - } - return c.pushBlock(b) -} - // NewChain creates a new sql-chain struct. func NewChain(c *Config) (chain *Chain, err error) { return NewChainWithContext(context.Background(), c) @@ -195,7 +180,6 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), cl: rpc.NewCaller(), rt: newRunTime(ctx, c), - ctx: ctx, blocks: make(chan *types.Block), heights: make(chan int32, 1), responses: make(chan *types.ResponseHeader), @@ -330,6 +314,18 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } +func (c *Chain) genesis(b *types.Block) (err error) { + if b == nil { + err = errors.New("genesis block not provided") + return + } + if err = b.VerifyAsGenesis(); err != nil { + err = errors.Wrap(err, "initialize chain state") + return + } + return c.pushBlock(b) +} + // pushBlock pushes the signed block header to extend the current main chain. func (c *Chain) pushBlock(b *types.Block) (err error) { // Prepare and encode @@ -553,23 +549,24 @@ func (c *Chain) syncHead() { } resp := &MuxFetchBlockResp{} peers := c.rt.getPeers() + l := len(peers.Servers) succ := false + le := log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + "curr_turn": c.rt.getNextTurn(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), + }) for i, s := range peers.Servers { + ile := le.WithFields(log.Fields{"remote": fmt.Sprintf("[%d/%d] %s", i, l, s)}) if s != c.rt.getServer() { if err = c.cl.CallNode( s, route.SQLCFetchBlock.String(), req, resp, ); err != nil || resp.Block == nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - "db": c.databaseID, - }).WithError(err).Debug( - "Failed to fetch block from peer") + ile.WithError(err).Debug("failed to fetch block from peer") } else { trackBlock(resp.Block) select { @@ -578,16 +575,7 @@ func (c *Chain) syncHead() { err = c.rt.ctx.Err() return } - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "remote": fmt.Sprintf("[%d/%d] %s", i, len(peers.Servers), s), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - "db": c.databaseID, - }).Debug( - "Fetch block from remote peer successfully") + ile.Debug("fetch block from remote peer successfully") succ = true break } @@ -595,15 +583,7 @@ func (c *Chain) syncHead() { } if !succ { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - "db": c.databaseID, - }).Debug( - "Cannot get block from any peer") + le.Debug("cannot get block from any peer") } } } @@ -642,17 +622,6 @@ func (c *Chain) runCurrentTurn(now time.Time) { } } -func (c *Chain) logEntryWithChainStatus() *log.Entry { - return log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - }) -} - // mainCycle runs main cycle of the sql-chain. func (c *Chain) mainCycle(ctx context.Context) { for { @@ -971,7 +940,9 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { func (c *Chain) VerifyAndPushAckedQuery(ack *types.SignedAckHeader) (err error) { // TODO(leventeliu): check ack. if c.rt.queryTimeIsExpired(ack.GetResponseTimestamp()) { - err = errors.Wrapf(ErrQueryExpired, "Verify ack query, min valid height %d, ack height %d", c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) + err = errors.Wrapf(ErrQueryExpired, + "Verify ack query, min valid height %d, ack height %d", + c.rt.getMinValidHeight(), c.rt.getHeightFromTime(ack.Timestamp)) return } From 6552567dc98f4321dcabca4eaef0803ec4115586 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 4 Mar 2019 21:15:33 +0800 Subject: [PATCH 058/244] Fix genesis block height --- sqlchain/runtime.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index baf1c82f1..b60a6a2e3 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -145,7 +145,7 @@ func (r *runtime) setGenesis(b *types.Block) { r.head = &state{ node: nil, Head: *b.GenesisHash(), - Height: -1, + Height: 0, } } From b05e8c25c3bc7a82b1abd4442425c6ed2bf7ecaa Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Tue, 5 Mar 2019 10:23:22 +0800 Subject: [PATCH 059/244] Format more logs --- sqlchain/chain.go | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 9efa93a77..1bf186670 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -471,20 +471,22 @@ func (c *Chain) produceBlock(now time.Time) (err error) { return } // Send to pending list + le := log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "time": c.rt.getChainTimeString(), + "curr_turn": c.rt.getNextTurn(), + "using_timestamp": now.Format(time.RFC3339Nano), + "block_hash": block.BlockHash().String(), + }) select { case c.blocks <- block: case <-c.rt.ctx.Done(): err = c.rt.ctx.Err() + le.WithError(err).Debug("abort block producing") return } - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "using_timestamp": now.Format(time.RFC3339Nano), - "block_hash": block.BlockHash().String(), - "db": c.databaseID, - }).Debug("produced new block") + le.Debug("produced new block") // Advise new block to the other peers var ( req = &MuxAdviseNewBlockReq{ @@ -517,14 +519,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { if err := c.cl.CallNodeWithContext( c.rt.ctx, id, route.SQLCAdviseNewBlock.String(), req, resp, ); err != nil { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "using_timestamp": now.Format(time.RFC3339Nano), - "block_hash": block.BlockHash().String(), - "db": c.databaseID, - }).WithError(err).Error("failed to advise new block") + le.WithError(err).Error("failed to advise new block") } }(s) } @@ -573,6 +568,7 @@ func (c *Chain) syncHead() { case c.blocks <- resp.Block: case <-c.rt.ctx.Done(): err = c.rt.ctx.Err() + le.WithError(err).Debug("abort head block synchronizing") return } ile.Debug("fetch block from remote peer successfully") @@ -627,6 +623,9 @@ func (c *Chain) mainCycle(ctx context.Context) { for { select { case <-ctx.Done(): + log.WithFields(log.Fields{ + "db": c.databaseID, + }).WithError(ctx.Err()).Debug("abort main cycle") return default: c.syncHead() @@ -684,9 +683,7 @@ func (c *Chain) processBlocks(ctx context.Context) { wg.Wait() }() - var ( - stash []*types.Block - ) + var stash []*types.Block for { le := log.WithFields(log.Fields{ "db": c.databaseID, @@ -761,6 +758,7 @@ func (c *Chain) processBlocks(ctx context.Context) { } } case <-ctx.Done(): + le.WithError(ctx.Err()).Debug("abort block processing") return } } From 9f5ab4277462c31c291b6e3c4cee8ca6b7199743 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Tue, 5 Mar 2019 11:20:53 +0800 Subject: [PATCH 060/244] Add common method to create log entry context --- sqlchain/chain.go | 123 +++++++++++++++++++--------------------------- 1 file changed, 50 insertions(+), 73 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 1bf186670..1f4314b8e 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -367,7 +367,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { if ierr = c.AddResponse(v.Response); ierr != nil { le.WithFields(log.Fields{ "index": i, - }).WithError(ierr).Warn("failed to add response to ackIndex") + }).WithError(ierr).Warn("failed to add Response to ackIndex") } } for i, v := range b.Acks { @@ -378,10 +378,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { } } - log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString()[:14], - "time": c.rt.getChainTimeString(), + c.logEntry().WithFields(log.Fields{ "block": b.BlockHash().String()[:8], "producer": b.Producer()[:8], "queryCount": len(b.QueryTxs), @@ -471,11 +468,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { return } // Send to pending list - le := log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), + le := c.logEntryWithHeadState().WithFields(log.Fields{ "using_timestamp": now.Format(time.RFC3339Nano), "block_hash": block.BlockHash().String(), }) @@ -483,7 +476,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { case c.blocks <- block: case <-c.rt.ctx.Done(): err = c.rt.ctx.Err() - le.WithError(err).Debug("abort block producing") + le.WithError(err).Info("abort block producing") return } le.Debug("produced new block") @@ -546,14 +539,7 @@ func (c *Chain) syncHead() { peers := c.rt.getPeers() l := len(peers.Servers) succ := false - le := log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - }) + le := c.logEntryWithHeadState() for i, s := range peers.Servers { ile := le.WithFields(log.Fields{"remote": fmt.Sprintf("[%d/%d] %s", i, l, s)}) @@ -568,7 +554,7 @@ func (c *Chain) syncHead() { case c.blocks <- resp.Block: case <-c.rt.ctx.Done(): err = c.rt.ctx.Err() - le.WithError(err).Debug("abort head block synchronizing") + le.WithError(err).Info("abort head block synchronizing") return } ile.Debug("fetch block from remote peer successfully") @@ -596,13 +582,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { c.heights <- c.rt.getHead().Height }() - var le = log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), + le := c.logEntryWithHeadState().WithFields(log.Fields{ "using_timestamp": now.Format(time.RFC3339Nano), }) @@ -623,9 +603,7 @@ func (c *Chain) mainCycle(ctx context.Context) { for { select { case <-ctx.Done(): - log.WithFields(log.Fields{ - "db": c.databaseID, - }).WithError(ctx.Err()).Debug("abort main cycle") + c.logEntry().WithError(ctx.Err()).Info("abort main cycle") return default: c.syncHead() @@ -640,12 +618,7 @@ func (c *Chain) mainCycle(ctx context.Context) { // sync synchronizes blocks and queries from the other peers. func (c *Chain) sync() (err error) { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }).Debug("synchronizing chain state") - + c.logEntry().Debug("synchronizing chain state") for { now := c.rt.now() height := c.rt.getHeightFromTime(now) @@ -657,7 +630,6 @@ func (c *Chain) sync() (err error) { c.rt.setNextTurn() } } - return } @@ -669,10 +641,13 @@ func (c *Chain) processBlocks(ctx context.Context) { returnStash := func(stash []*types.Block) { defer wg.Done() - for _, block := range stash { + for i, block := range stash { select { case c.blocks <- block: case <-cld.Done(): + c.logEntry().WithFields(log.Fields{ + "remaining": len(stash) - i, + }).WithError(cld.Err()).Debug("abort stash returning") return } } @@ -685,22 +660,13 @@ func (c *Chain) processBlocks(ctx context.Context) { var stash []*types.Block for { - le := log.WithFields(log.Fields{ - "db": c.databaseID, - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "curr_turn": c.rt.getNextTurn(), - "head_height": c.rt.getHead().Height, - "head_block": c.rt.getHead().Head.String(), - }) select { case h := <-c.heights: // Return all stashed blocks to pending channel - le.WithFields(log.Fields{ + c.logEntryWithHeadState().WithFields(log.Fields{ "height": h, "stashs": len(stash), }).Debug("read new height from channel") - if stash != nil { wg.Add(1) go returnStash(stash) @@ -708,7 +674,7 @@ func (c *Chain) processBlocks(ctx context.Context) { } case block := <-c.blocks: height := c.rt.getHeightFromTime(block.Timestamp()) - le = le.WithFields(log.Fields{ + le := c.logEntryWithHeadState().WithFields(log.Fields{ "block_height": height, "block_hash": block.BlockHash().String(), }) @@ -758,7 +724,7 @@ func (c *Chain) processBlocks(ctx context.Context) { } } case <-ctx.Done(): - le.WithError(ctx.Err()).Debug("abort block processing") + c.logEntryWithHeadState().WithError(ctx.Err()).Debug("abort block processing") return } } @@ -779,11 +745,7 @@ func (c *Chain) Start() (err error) { // Stop stops the main process of the sql-chain. func (c *Chain) Stop() (err error) { // Stop main process - le := log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), - "db": c.databaseID, - }) + le := c.logEntry() le.Debug("stopping chain") c.rt.stop(c.databaseID) le.Debug("chain service and workers stopped") @@ -872,29 +834,27 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { } return -1 }() - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), + le := c.logEntryWithHeadState().WithFields(log.Fields{ "block": block.BlockHash().String(), "producer": block.Producer(), "blocktime": block.Timestamp().Format(time.RFC3339Nano), "blockheight": height, "blockparent": block.ParentHash().String(), - "headblock": head.Head.String(), - "headheight": head.Height, - "db": c.databaseID, - }).WithError(err).Debug("checking new block from other peer") + }) + le.Debug("checking new block from other peer") if head.Height == height && head.Head.IsEqual(block.BlockHash()) { // Maybe already set by FetchBlock return nil } else if !block.ParentHash().IsEqual(&head.Head) { - // Pushed block must extend the best chain + err = ErrInvalidBlock + le.WithError(err).Error("invalid new block for the current chain") return ErrInvalidBlock } // Verify block signatures if err = block.Verify(); err != nil { + le.WithError(err).Error("failed to verify block") return } @@ -904,21 +864,19 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { } // Check block producer index, found := peers.Find(block.Producer()) - if !found { + err = ErrUnknownProducer + le.WithError(err).Error("unknown producer of new block") return ErrUnknownProducer } if index != next { - log.WithFields(log.Fields{ - "peer": c.rt.getPeerInfoString(), - "time": c.rt.getChainTimeString(), + err = ErrInvalidProducer + le.WithFields(log.Fields{ "expected": next, "actual": index, - "db": c.databaseID, - }).WithError(err).Error( - "Failed to check new block") - return ErrInvalidProducer + }).WithError(err).Error("invalid producer of new block") + return } // TODO(leventeliu): check if too many periods are skipped or store block for future use. @@ -928,6 +886,7 @@ func (c *Chain) CheckAndPushNewBlock(block *types.Block) (err error) { // Replicate local state from the new block if err = c.st.ReplayBlockWithContext(c.rt.ctx, block); err != nil { + le.WithError(err).Error("failed to replay new block") return } @@ -1004,8 +963,7 @@ func (c *Chain) stat() { bc = atomic.LoadInt32(&cachedBlockCount) ) // Print chain stats - log.WithFields(log.Fields{ - "database_id": c.databaseID, + c.logEntry().WithFields(log.Fields{ "multiIndex_count": ic, "response_header_count": rc, "query_tracker_count": tc, @@ -1099,3 +1057,22 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { ub.Receiver, err = c.databaseID.AccountAddress() return } + +func (c *Chain) logEntry() *log.Entry { + return log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "offset": c.rt.getChainTimeString(), + }) +} + +func (c *Chain) logEntryWithHeadState() *log.Entry { + return log.WithFields(log.Fields{ + "db": c.databaseID, + "peer": c.rt.getPeerInfoString(), + "offset": c.rt.getChainTimeString(), + "curr_turn": c.rt.getNextTurn(), + "head_height": c.rt.getHead().Height, + "head_block": c.rt.getHead().Head.String(), + }) +} From a8e9d47e2f806b4e29b2c187d3ab4a08257a9185 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 5 Mar 2019 16:59:57 +0800 Subject: [PATCH 061/244] Add test for utils/log/debug package --- utils/log/debug/handler_test.go | 105 ++++++++++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 utils/log/debug/handler_test.go diff --git a/utils/log/debug/handler_test.go b/utils/log/debug/handler_test.go new file mode 100644 index 000000000..8ba588dd2 --- /dev/null +++ b/utils/log/debug/handler_test.go @@ -0,0 +1,105 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package debug_test + +import ( + "encoding/json" + "net" + "net/http" + "testing" + + "github.com/jmoiron/jsonq" + . "github.com/smartystreets/goconvey/convey" + + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +func parseResponse(resp *http.Response, r error) (result *jsonq.JsonQuery, err error) { + if r != nil { + err = r + return + } + + var res map[string]interface{} + err = json.NewDecoder(resp.Body).Decode(&res) + if err != nil { + return + } + + result = jsonq.NewQuery(res) + return +} + +func mustJSONQ(c C) func(interface{}, error) interface{} { + return func(i interface{}, e error) interface{} { + c.So(e, ShouldBeNil) + return i + } +} + +func TestDebugHandler(t *testing.T) { + Convey("test debug handler", t, func(c C) { + server := http.Server{} + listener, err := net.Listen("tcp", ":0") + So(err, ShouldBeNil) + defer func() { + _ = listener.Close() + }() + go func() { + _ = server.Serve(listener) + }() + log.SetLevel(log.DebugLevel) + url := "http://" + listener.Addr().String() + "/debug/covenantsql/loglevel" + resp, err := parseResponse(http.Get(url)) + So(err, ShouldBeNil) + So(mustJSONQ(c)(resp.String("level")), ShouldEqual, log.GetLevel().String()) + resp, err = parseResponse(http.PostForm(url, map[string][]string{"level": {"fatal"}})) + So(err, ShouldBeNil) + So(mustJSONQ(c)(resp.String("level")), ShouldEqual, log.GetLevel().String()) + So(log.GetLevel().String(), ShouldEqual, "fatal") + So(mustJSONQ(c)(resp.String("orig")), ShouldEqual, "debug") + So(mustJSONQ(c)(resp.String("want")), ShouldEqual, "fatal") + resp, err = parseResponse(http.PostForm(url, map[string][]string{"level": {"info"}})) + So(err, ShouldBeNil) + So(mustJSONQ(c)(resp.String("level")), ShouldEqual, log.GetLevel().String()) + So(log.GetLevel().String(), ShouldEqual, "info") + So(mustJSONQ(c)(resp.String("orig")), ShouldEqual, "fatal") + So(mustJSONQ(c)(resp.String("want")), ShouldEqual, "info") + + // test invalid level + resp, err = parseResponse(http.PostForm(url, map[string][]string{"level": {"happy"}})) + So(err, ShouldBeNil) + So(mustJSONQ(c)(resp.String("level")), ShouldEqual, log.GetLevel().String()) + So(log.GetLevel().String(), ShouldEqual, "info") + So(mustJSONQ(c)(resp.String("orig")), ShouldEqual, "info") + So(mustJSONQ(c)(resp.String("want")), ShouldEqual, "happy") + So(mustJSONQ(c)(resp.String("err")), ShouldNotBeEmpty) + + // test empty level + resp, err = parseResponse(http.PostForm(url, nil)) + So(err, ShouldBeNil) + So(mustJSONQ(c)(resp.String("level")), ShouldEqual, log.GetLevel().String()) + So(log.GetLevel().String(), ShouldEqual, "info") + So(mustJSONQ(c)(resp.String("orig")), ShouldEqual, "info") + + // test invalid query + rawResp, err := http.Head(url) + So(err, ShouldBeNil) + So(rawResp.StatusCode, ShouldEqual, http.StatusBadRequest) + + }) +} From ad00f30709e577c2c5ddfb2a6b9d496e8a747e68 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 5 Mar 2019 17:18:42 +0800 Subject: [PATCH 062/244] Update README.md --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 41587585f..6da536ba9 100644 --- a/README.md +++ b/README.md @@ -28,8 +28,9 @@ [中文简介](https://github.com/CovenantSQL/CovenantSQL/blob/develop/README-zh.md) -CovenantSQL is a decentralized, crowdsourcing SQL database on blockchain with features: +CovenantSQL is a GDPR-compliant SQL database running on Open Internet without central coordination: +- **GDPR-compliant**: Zero pain to be GDPR-compliant. - **SQL**: most SQL-92 support. - **Decentralize**: decentralize with our consensus algorithm DH-RPC & Kayak. - **Privacy**: access with granted permission and Encryption Pass. From b19c65b12f0d281a09da01ea583fd6d4b381e71e Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Tue, 5 Mar 2019 21:31:11 +0800 Subject: [PATCH 063/244] Skip empty blocks --- sqlchain/chain.go | 25 +++++++++++++++---------- sqlchain/chain_test.go | 8 ++++---- 2 files changed, 19 insertions(+), 14 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 1f4314b8e..a280f1f35 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -430,6 +430,11 @@ func (c *Chain) produceBlock(now time.Time) (err error) { qts []*x.QueryTracker ) if frs, qts, err = c.st.CommitEx(); err != nil { + err = errors.Wrap(err, "failed to fetch query list from db state") + return + } + if len(frs) == 0 && len(qts) == 0 { + c.logEntryWithHeadState().Debug("no query found in current period, skip block producing") return } var block = &types.Block{ @@ -617,20 +622,23 @@ func (c *Chain) mainCycle(ctx context.Context) { } // sync synchronizes blocks and queries from the other peers. -func (c *Chain) sync() (err error) { - c.logEntry().Debug("synchronizing chain state") +func (c *Chain) sync() { + le := c.logEntry() + le.Debug("synchronizing chain state") for { now := c.rt.now() height := c.rt.getHeightFromTime(now) - if c.rt.getNextTurn() >= height { + if now.Before(c.rt.chainInitTime) { + le.Debug("now time is before genesis time, waiting for genesis") + return + } + if c.rt.getNextTurn() > height { break } for c.rt.getNextTurn() <= height { - // TODO(leventeliu): fetch blocks and queries. - c.rt.setNextTurn() + c.syncHead() } } - return } func (c *Chain) processBlocks(ctx context.Context) { @@ -732,11 +740,8 @@ func (c *Chain) processBlocks(ctx context.Context) { // Start starts the main process of the sql-chain. func (c *Chain) Start() (err error) { - if err = c.sync(); err != nil { - return - } - c.rt.goFunc(c.processBlocks) + c.sync() c.rt.goFunc(c.mainCycle) c.rt.startService(c) return diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 4fbba3cf9..4e75c37b2 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -104,7 +104,7 @@ func TestMultiChain(t *testing.T) { } for i, p := range peers.Servers { - t.Logf("Peer #%d: %s", i, p) + t.Logf("peer #%d: %s", i, p) } // Create config info from created nodes @@ -265,7 +265,7 @@ func TestMultiChain(t *testing.T) { if chain, err := NewChain(p.config); err != nil { t.Errorf("error occurred: %v", err) } else { - t.Logf("Load chain from file %s: head = %s height = %d", + t.Logf("load chain from file %s: head = %s height = %d", p.dbfile, chain.rt.getHead().Head, chain.rt.getHead().Height) } }(v) @@ -290,12 +290,12 @@ func TestMultiChain(t *testing.T) { for i := int32(0); i <= ch; i++ { var node *blockNode if node = c.rt.getHead().node.ancestor(i); node == nil { - t.Logf("Block at height %d not found in peer %s, continue", + t.Logf("block at height %d not found in peer %s, continue", i, c.rt.getPeerInfoString()) continue } if node.block != nil { - t.Logf("Checking block %v at height %d in peer %s", + t.Logf("checking block %v at height %d in peer %s", node.block.BlockHash(), i, c.rt.getPeerInfoString()) } } From 309be06c60e291ab7fd2df56e918c7d33a27b76a Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 5 Mar 2019 16:30:47 +0800 Subject: [PATCH 064/244] Combine cql-adapter to cql command. --- Makefile | 10 +-- cmd/cql-adapter/main.go | 83 ------------------- cmd/cql/main.go | 33 ++++++-- .../adapter}/README.md | 0 .../adapter}/api/account.go | 0 .../adapter}/api/admin.go | 2 +- .../adapter}/api/doc.go | 0 .../adapter}/api/query.go | 2 +- .../adapter}/api/router.go | 0 .../adapter}/api/utils.go | 0 .../adapter}/config/config.go | 2 +- .../adapter}/config/doc.go | 0 .../adapter}/config/errors.go | 0 .../adapter}/server.go | 6 +- .../adapter}/storage/covenantsql.go | 0 .../adapter}/storage/doc.go | 0 .../adapter}/storage/sqlite3.go | 0 .../adapter}/storage/storage.go | 0 18 files changed, 34 insertions(+), 104 deletions(-) delete mode 100644 cmd/cql-adapter/main.go rename {cmd/cql-adapter => sqlchain/adapter}/README.md (100%) rename {cmd/cql-adapter => sqlchain/adapter}/api/account.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/api/admin.go (98%) rename {cmd/cql-adapter => sqlchain/adapter}/api/doc.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/api/query.go (98%) rename {cmd/cql-adapter => sqlchain/adapter}/api/router.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/api/utils.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/config/config.go (98%) rename {cmd/cql-adapter => sqlchain/adapter}/config/doc.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/config/errors.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/server.go (93%) rename {cmd/cql-adapter => sqlchain/adapter}/storage/covenantsql.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/storage/doc.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/storage/sqlite3.go (100%) rename {cmd/cql-adapter => sqlchain/adapter}/storage/storage.go (100%) diff --git a/Makefile b/Makefile index a1519d68f..ca3362038 100644 --- a/Makefile +++ b/Makefile @@ -165,12 +165,6 @@ bin/cql-fuse: -o bin/cql-fuse \ github.com/CovenantSQL/CovenantSQL/cmd/cql-fuse -bin/cql-adapter: - $(GOBUILD) \ - -ldflags "$(ldflags_role_client)" \ - -o bin/cql-adapter \ - github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter - bin/cql-mysql-adapter: $(GOBUILD) \ -ldflags "$(ldflags_role_client)" \ @@ -187,7 +181,7 @@ bp: bin/cqld.test bin/cqld miner: bin/cql-minerd.test bin/cql-minerd -client: bin/cql-utils bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet +client: bin/cql-utils bin/cql bin/cql.test bin/cql-fuse bin/cql-mysql-adapter bin/cql-faucet all: bp miner client @@ -198,4 +192,4 @@ clean: .PHONY: status start stop logs push push_testnet clean \ bin/cqld.test bin/cqld bin/cql-minerd.test bin/cql-minerd bin/cql-utils \ - bin/cql bin/cql.test bin/cql-fuse bin/cql-adapter bin/cql-mysql-adapter bin/cql-faucet + bin/cql bin/cql.test bin/cql-fuse bin/cql-mysql-adapter bin/cql-faucet diff --git a/cmd/cql-adapter/main.go b/cmd/cql-adapter/main.go deleted file mode 100644 index f45382daf..000000000 --- a/cmd/cql-adapter/main.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "context" - "flag" - "fmt" - "os" - "runtime" - "time" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -const name = "cql-adapeter" - -var ( - version = "unknown" - configFile string - listenAddr string - password string - showVersion bool -) - -func init() { - flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for adapter") - flag.StringVar(&password, "password", "", "Master key password") - flag.StringVar(&listenAddr, "listen", "", "Listen address for adapter api") - flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, - "Disable signature sign and verify, for testing") - flag.BoolVar(&showVersion, "version", false, "Show version information and exit") -} - -func main() { - flag.Parse() - if showVersion { - fmt.Printf("%v %v %v %v %v\n", - name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) - os.Exit(0) - } - - configFile = utils.HomeDirExpand(configFile) - - flag.Visit(func(f *flag.Flag) { - log.Infof("args %#v : %s", f.Name, f.Value) - }) - - server, err := NewHTTPAdapter(listenAddr, configFile, password) - if err != nil { - log.WithError(err).Fatal("init adapter failed") - return - } - - log.Info("start adapter") - if err = server.Serve(); err != nil { - log.WithError(err).Fatal("start adapter failed") - return - } - - <-utils.WaitForExit() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - server.Shutdown(ctx) - log.Info("stopped adapter") -} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 8d2edd90e..661088d2c 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -51,6 +51,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter" "github.com/CovenantSQL/CovenantSQL/sqlchain/observer" "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils" @@ -71,14 +72,14 @@ var ( singleTransaction bool showVersion bool variables varsFlag - stopCh = make(chan struct{}) logLevel string - // Shard chain explorer stuff + // Shard chain explorer/adapter stuff tmpPath string // background observer and explorer block and log file path cLog *logrus.Logger // console logger bgLogLevel string // background log level explorerAddr string // explorer Web addr + adapterAddr string // adapter listen addr // DML variables createDB string // as a instance meta json string or simply a node count @@ -251,6 +252,7 @@ func init() { flag.StringVar(&tmpPath, "tmp-path", "", "Explorer temp file path, use os.TempDir for default") flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") + flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") // DML flags flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") @@ -307,14 +309,31 @@ func main() { cLog.Infof("explorer started on %s", explorerAddr) } - defer close(stopCh) - go func() { - <-stopCh + defer func() { _ = observer.StopObserver(service, httpServer) log.Info("explorer stopped") }() } + if adapterAddr != "" { + server, err := adapter.NewHTTPAdapter(adapterAddr, configFile, password) + if err != nil { + log.WithError(err).Fatal("init adapter failed") + } + + log.Info("start adapter") + if err = server.Serve(); err != nil { + log.WithError(err).Fatal("start adapter failed") + } else { + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + server.Shutdown(ctx) + log.Info("stopped adapter") + }() + } + } + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. @@ -580,8 +599,8 @@ func main() { } // if web flag is enabled - if explorerAddr != "" { - fmt.Printf("Ctrl + C to stop explorer listen on %s", explorerAddr) + if explorerAddr != "" || adapterAddr != "" { + fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s", explorerAddr, adapterAddr) <-utils.WaitForExit() return } diff --git a/cmd/cql-adapter/README.md b/sqlchain/adapter/README.md similarity index 100% rename from cmd/cql-adapter/README.md rename to sqlchain/adapter/README.md diff --git a/cmd/cql-adapter/api/account.go b/sqlchain/adapter/api/account.go similarity index 100% rename from cmd/cql-adapter/api/account.go rename to sqlchain/adapter/api/account.go diff --git a/cmd/cql-adapter/api/admin.go b/sqlchain/adapter/api/admin.go similarity index 98% rename from cmd/cql-adapter/api/admin.go rename to sqlchain/adapter/api/admin.go index 55eb4ef79..c367799ea 100644 --- a/cmd/cql-adapter/api/admin.go +++ b/sqlchain/adapter/api/admin.go @@ -22,7 +22,7 @@ import ( "net/http" "strconv" - "github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter/config" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/config" "github.com/CovenantSQL/CovenantSQL/utils/log" ) diff --git a/cmd/cql-adapter/api/doc.go b/sqlchain/adapter/api/doc.go similarity index 100% rename from cmd/cql-adapter/api/doc.go rename to sqlchain/adapter/api/doc.go diff --git a/cmd/cql-adapter/api/query.go b/sqlchain/adapter/api/query.go similarity index 98% rename from cmd/cql-adapter/api/query.go rename to sqlchain/adapter/api/query.go index 1142e4bf2..0b865718c 100644 --- a/cmd/cql-adapter/api/query.go +++ b/sqlchain/adapter/api/query.go @@ -20,7 +20,7 @@ import ( "fmt" "net/http" - "github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter/config" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/config" "github.com/CovenantSQL/CovenantSQL/utils/log" ) diff --git a/cmd/cql-adapter/api/router.go b/sqlchain/adapter/api/router.go similarity index 100% rename from cmd/cql-adapter/api/router.go rename to sqlchain/adapter/api/router.go diff --git a/cmd/cql-adapter/api/utils.go b/sqlchain/adapter/api/utils.go similarity index 100% rename from cmd/cql-adapter/api/utils.go rename to sqlchain/adapter/api/utils.go diff --git a/cmd/cql-adapter/config/config.go b/sqlchain/adapter/config/config.go similarity index 98% rename from cmd/cql-adapter/config/config.go rename to sqlchain/adapter/config/config.go index ba2ed392d..66022a4b3 100644 --- a/cmd/cql-adapter/config/config.go +++ b/sqlchain/adapter/config/config.go @@ -28,8 +28,8 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/CovenantSQL/CovenantSQL/client" - "github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter/storage" "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/storage" "github.com/CovenantSQL/CovenantSQL/utils/log" ) diff --git a/cmd/cql-adapter/config/doc.go b/sqlchain/adapter/config/doc.go similarity index 100% rename from cmd/cql-adapter/config/doc.go rename to sqlchain/adapter/config/doc.go diff --git a/cmd/cql-adapter/config/errors.go b/sqlchain/adapter/config/errors.go similarity index 100% rename from cmd/cql-adapter/config/errors.go rename to sqlchain/adapter/config/errors.go diff --git a/cmd/cql-adapter/server.go b/sqlchain/adapter/server.go similarity index 93% rename from cmd/cql-adapter/server.go rename to sqlchain/adapter/server.go index 4fb17af78..107e3ec73 100644 --- a/cmd/cql-adapter/server.go +++ b/sqlchain/adapter/server.go @@ -14,7 +14,7 @@ * limitations under the License. */ -package main +package adapter import ( "context" @@ -24,8 +24,8 @@ import ( "github.com/gorilla/handlers" - "github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter/api" - "github.com/CovenantSQL/CovenantSQL/cmd/cql-adapter/config" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/api" + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/config" ) // HTTPAdapter is a adapter for covenantsql/alternative sqlite3 service. diff --git a/cmd/cql-adapter/storage/covenantsql.go b/sqlchain/adapter/storage/covenantsql.go similarity index 100% rename from cmd/cql-adapter/storage/covenantsql.go rename to sqlchain/adapter/storage/covenantsql.go diff --git a/cmd/cql-adapter/storage/doc.go b/sqlchain/adapter/storage/doc.go similarity index 100% rename from cmd/cql-adapter/storage/doc.go rename to sqlchain/adapter/storage/doc.go diff --git a/cmd/cql-adapter/storage/sqlite3.go b/sqlchain/adapter/storage/sqlite3.go similarity index 100% rename from cmd/cql-adapter/storage/sqlite3.go rename to sqlchain/adapter/storage/sqlite3.go diff --git a/cmd/cql-adapter/storage/storage.go b/sqlchain/adapter/storage/storage.go similarity index 100% rename from cmd/cql-adapter/storage/storage.go rename to sqlchain/adapter/storage/storage.go From 80b62bf010e98f231cf3e5877dc1365c3115be33 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 5 Mar 2019 16:47:14 +0800 Subject: [PATCH 065/244] Update docker service cql-adapter command. --- bin/docker-entry.sh | 2 +- docker-compose.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 41d618b60..f1da213d6 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -15,7 +15,7 @@ observer) exec /app/cql -config "${COVENANT_CONF}" -web "${COVENANTSQL_OBSERVER_ADDR}" "${@}" ;; adapter) - exec /app/cql-adapter -config "${COVENANT_CONF}" "${@}" + exec /app/cql -config "${COVENANT_CONF}" -adapter "${COVENANTSQL_ADAPTER_ADDR}" "${@}" ;; mysql-adapter) exec /app/cql-mysql-adapter -config "${COVENANT_CONF}" "${@}" diff --git a/docker-compose.yml b/docker-compose.yml index 355b28fac..facf76a3c 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -136,6 +136,7 @@ services: environment: COVENANT_ROLE: adapter COVENANT_CONF: ./node_adapter/config.yaml + COVENANTSQL_ADAPTER_ADDR: localhost:4661 volumes: - ./test/service/node_adapter/config.yaml:/app/config.yaml - ./test/service/node_adapter/private.key:/app/private.key From b65cbe1d0b34d11e6db38f98ce8e06a81a6a006a Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 5 Mar 2019 20:13:04 +0800 Subject: [PATCH 066/244] Fix adapter init driver twice. --- cmd/cql/main.go | 7 ++++--- docker-compose.yml | 4 +--- sqlchain/adapter/config/config.go | 10 +++------- sqlchain/adapter/server.go | 4 ++-- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 661088d2c..de31d6109 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -274,7 +274,7 @@ func main() { if tmpPath == "" { tmpPath = os.TempDir() } - logPath := filepath.Join(tmpPath, "covenant_explorer.log") + logPath := filepath.Join(tmpPath, "covenant_service.log") bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) @@ -316,15 +316,16 @@ func main() { } if adapterAddr != "" { - server, err := adapter.NewHTTPAdapter(adapterAddr, configFile, password) + server, err := adapter.NewHTTPAdapter(adapterAddr, configFile) if err != nil { log.WithError(err).Fatal("init adapter failed") } - log.Info("start adapter") if err = server.Serve(); err != nil { log.WithError(err).Fatal("start adapter failed") } else { + cLog.Infof("adapter started on %s", adapterAddr) + defer func() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/docker-compose.yml b/docker-compose.yml index facf76a3c..a9d54e6f5 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -136,10 +136,8 @@ services: environment: COVENANT_ROLE: adapter COVENANT_CONF: ./node_adapter/config.yaml - COVENANTSQL_ADAPTER_ADDR: localhost:4661 + COVENANTSQL_ADAPTER_ADDR: 0.0.0.0:4661 volumes: - - ./test/service/node_adapter/config.yaml:/app/config.yaml - - ./test/service/node_adapter/private.key:/app/private.key - ./test/service/node_adapter/:/app/node_adapter/ networks: default: diff --git a/sqlchain/adapter/config/config.go b/sqlchain/adapter/config/config.go index 66022a4b3..8a89bcdb7 100644 --- a/sqlchain/adapter/config/config.go +++ b/sqlchain/adapter/config/config.go @@ -27,7 +27,6 @@ import ( yaml "gopkg.in/yaml.v2" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter/storage" "github.com/CovenantSQL/CovenantSQL/utils/log" @@ -69,8 +68,9 @@ type confWrapper struct { Adapter Config `yaml:"Adapter"` } -// LoadConfig load and verify config in config file and set to global config instance. -func LoadConfig(configPath string, password string) (config *Config, err error) { +// LoadConfig load and verify config in config file (Reuse some global config instance values). +// Should call conf.LoadConfig before use. e.g client.Init +func LoadConfig(configPath string) (config *Config, err error) { var workingRoot string var configBytes []byte if configBytes, err = ioutil.ReadFile(configPath); err != nil { @@ -88,10 +88,6 @@ func LoadConfig(configPath string, password string) (config *Config, err error) config.StorageDriver = "covenantsql" } if config.StorageDriver == "covenantsql" { - // init client - if err = client.Init(configPath, []byte(password)); err != nil { - return - } workingRoot = conf.GConf.WorkingRoot } else { if workingRoot, err = os.Getwd(); err != nil { diff --git a/sqlchain/adapter/server.go b/sqlchain/adapter/server.go index 107e3ec73..de1fb533f 100644 --- a/sqlchain/adapter/server.go +++ b/sqlchain/adapter/server.go @@ -34,12 +34,12 @@ type HTTPAdapter struct { } // NewHTTPAdapter creates adapter to service. -func NewHTTPAdapter(listenAddr string, configFile string, password string) (adapter *HTTPAdapter, err error) { +func NewHTTPAdapter(listenAddr string, configFile string) (adapter *HTTPAdapter, err error) { adapter = new(HTTPAdapter) // load config file var cfg *config.Config - if cfg, err = config.LoadConfig(configFile, password); err != nil { + if cfg, err = config.LoadConfig(configFile); err != nil { return } From 6aff41690b946b57b79ca85045590adc935d0a3e Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 5 Mar 2019 21:24:15 +0800 Subject: [PATCH 067/244] Combine delete path func. --- cmd/cql-utils/confgen.go | 23 +---------------------- cmd/cql-utils/keygen.go | 25 ++----------------------- cmd/cql-utils/main.go | 25 +++++++++++++++++++++++++ codecov.yml | 1 + 4 files changed, 29 insertions(+), 45 deletions(-) diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index 89d0b80fc..adb18d9d6 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -17,13 +17,11 @@ package main import ( - "bufio" "flag" "fmt" "io/ioutil" "os" "path" - "strings" yaml "gopkg.in/yaml.v2" @@ -52,26 +50,7 @@ func runConfgen() { publicKeystoreFileName := "public.keystore" privateKeyFile = path.Join(workingRoot, privateKeyFileName) - if _, err := os.Stat(workingRoot); err == nil { - reader := bufio.NewReader(os.Stdin) - fmt.Printf("The directory \"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", - workingRoot) - t, err := reader.ReadString('\n') - t = strings.Trim(t, "\n") - if err != nil { - log.WithError(err).Error("unexpected error") - os.Exit(1) - } - if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { - err = os.RemoveAll(workingRoot) - if err != nil { - log.WithError(err).Error("unexpected error") - os.Exit(1) - } - } else { - os.Exit(0) - } - } + askDeletePath(workingRoot) err := os.Mkdir(workingRoot, 0755) if err != nil { diff --git a/cmd/cql-utils/keygen.go b/cmd/cql-utils/keygen.go index e1541541f..bac7eb513 100644 --- a/cmd/cql-utils/keygen.go +++ b/cmd/cql-utils/keygen.go @@ -17,11 +17,8 @@ package main import ( - "bufio" "encoding/hex" "fmt" - "os" - "strings" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -29,26 +26,8 @@ import ( ) func runKeygen() *asymmetric.PublicKey { - if _, err := os.Stat(privateKeyFile); err == nil { - reader := bufio.NewReader(os.Stdin) - fmt.Printf("Private key file \"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", - privateKeyFile) - t, err := reader.ReadString('\n') - t = strings.Trim(t, "\n") - if err != nil { - log.WithError(err).Error("unexpected error") - os.Exit(1) - } - if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { - err = os.Remove(privateKeyFile) - if err != nil { - log.WithError(err).Error("unexpected error") - os.Exit(1) - } - } else { - os.Exit(0) - } - } + + askDeletePath(privateKeyFile) privateKey, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 34d6c532d..7a7d8210d 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -17,10 +17,12 @@ package main import ( + "bufio" "flag" "fmt" "os" "runtime" + "strings" "syscall" "golang.org/x/crypto/ssh/terminal" @@ -114,3 +116,26 @@ func readMasterKey() (string, error) { fmt.Println() return string(bytePwd), err } + +func askDeletePath(path string) { + if _, err := os.Stat(path); err == nil { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("\"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", + path) + t, err := reader.ReadString('\n') + t = strings.Trim(t, "\n") + if err != nil { + log.WithError(err).Error("unexpected error") + os.Exit(1) + } + if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { + err = os.RemoveAll(path) + if err != nil { + log.WithError(err).Error("unexpected error") + os.Exit(1) + } + } else { + os.Exit(0) + } + } +} diff --git a/codecov.yml b/codecov.yml index 5027ce7f9..2db6258a6 100644 --- a/codecov.yml +++ b/codecov.yml @@ -31,6 +31,7 @@ ignore: - "vendor" - "cmd" - "sqlchain/observer" + - "sqlchain/adapter" - "test" - "crypto/secp256k1" - "_example/**/*" From 5cd3d8bb1ce24256ebd0d4c3f886981546f10c69 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 5 Mar 2019 22:00:47 +0800 Subject: [PATCH 068/244] Refactor runKeygen func. --- cmd/cql-utils/confgen.go | 2 +- cmd/cql-utils/keygen.go | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index adb18d9d6..7ab72d514 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -59,7 +59,7 @@ func runConfgen() { } fmt.Println("Generating key pair...") - publicKey := runKeygen() + publicKey := runKeygen(privateKeyFile) fmt.Println("Generated key pair.") fmt.Println("Generating nonce...") diff --git a/cmd/cql-utils/keygen.go b/cmd/cql-utils/keygen.go index bac7eb513..adbb4a6c1 100644 --- a/cmd/cql-utils/keygen.go +++ b/cmd/cql-utils/keygen.go @@ -25,25 +25,25 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) -func runKeygen() *asymmetric.PublicKey { +func runKeygen(privateKeyPath string) *asymmetric.PublicKey { - askDeletePath(privateKeyFile) + askDeletePath(privateKeyPath) - privateKey, _, err := asymmetric.GenSecp256k1KeyPair() + masterKey, err := readMasterKey() if err != nil { - log.WithError(err).Fatal("generate key pair failed") + log.WithError(err).Fatal("read master key failed") } - masterKey, err := readMasterKey() + privateKey, _, err := asymmetric.GenSecp256k1KeyPair() if err != nil { - log.WithError(err).Fatal("read master key failed") + log.WithError(err).Fatal("generate key pair failed") } - if err = kms.SavePrivateKey(privateKeyFile, privateKey, []byte(masterKey)); err != nil { + if err = kms.SavePrivateKey(privateKeyPath, privateKey, []byte(masterKey)); err != nil { log.WithError(err).Fatal("save generated keypair failed") } - fmt.Printf("Private key file: %s\n", privateKeyFile) + fmt.Printf("Private key file: %s\n", privateKeyPath) fmt.Printf("Public key's hex: %s\n", hex.EncodeToString(privateKey.PubKey().Serialize())) return privateKey.PubKey() } From 43b30063806454385841721eccb5b9f54e544647 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Wed, 6 Mar 2019 16:26:41 +0800 Subject: [PATCH 069/244] Add reads pool --- sqlchain/chain.go | 2 +- sqlchain/chain_test.go | 53 ++++++++++++++---------- sqlchain/xxx_test.go | 94 ++++++++++++++++++++++++++++++++++++++---- xenomint/pool.go | 7 ++++ xenomint/state.go | 9 ++++ 5 files changed, 136 insertions(+), 29 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index a280f1f35..01f5a542e 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -456,7 +456,7 @@ func (c *Chain) produceBlock(now time.Time) (err error) { for i, v := range qts { // TODO(leventeliu): maybe block waiting at a ready channel instead? for !v.Ready() { - time.Sleep(1 * time.Millisecond) + time.Sleep(c.rt.period / 10) if c.rt.ctx.Err() != nil { err = c.rt.ctx.Err() return diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 4e75c37b2..4d7f226d1 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -33,7 +33,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" - "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/types" ) var ( @@ -88,7 +88,7 @@ func TestIndexKey(t *testing.T) { } func TestMultiChain(t *testing.T) { - log.SetLevel(log.InfoLevel) + //log.SetLevel(log.InfoLevel) // Create genesis block genesis, err := createRandomBlock(genesisHash, true) @@ -302,18 +302,38 @@ func TestMultiChain(t *testing.T) { }(v.chain) } + // Create table + cli, err := newRandomNode(chains[0].chain, true) + if err != nil { + t.Fatalf("error occurred: %v", err) + } + req, err := cli.buildQuery(types.WriteQuery, []types.Query{ + buildQuery(`CREATE TABLE t1 (k INT, v TEXT, PRIMARY KEY(k))`), + buildQuery(`INSERT INTO t1 (k, v) VALUES (?, ?), (?, ?), (?, ?), (?, ?), (?, ?)`, + 1, "v1", 2, "v2", 3, "v3", 4, "v4", 5, "v5", + ), + }) + if err != nil { + t.Fatalf("error occurred: %v", err) + } + for i, v := range chains { + cli, err := newRandomNode(v.chain, i == 0) + if err != nil { + t.Fatalf("error occurred: %v", err) + } + err = cli.sendQuery(req) + if err != nil { + t.Fatalf("error occurred: %v", err) + } + } + // Create some random clients to push new queries for i, v := range chains { sC := make(chan struct{}) wg := &sync.WaitGroup{} - wk := &nodeProfile{ - NodeID: peers.Servers[i], - PrivateKey: testPrivKey, - PublicKey: testPubKey, - } for j := 0; j < testClientNumberPerChain; j++ { - cli, err := newRandomNode() + cli, err := newRandomNode(v.chain, i == 0) if err != nil { t.Fatalf("error occurred: %v", err) @@ -328,22 +348,13 @@ func TestMultiChain(t *testing.T) { case <-sC: break foreverLoop default: + var err error // Send a random query - resp, err := createRandomQueryResponse(p, wk) - - if err != nil { - t.Errorf("error occurred: %v", err) - } else if err = c.AddResponse(resp); err != nil { - t.Errorf("error occurred: %v", err) - } - - time.Sleep(time.Duration(rand.Int63n(500)+1) * time.Millisecond) - ack, err := createRandomQueryAckWithResponse(resp, p) - + err = cli.query(types.ReadQuery, []types.Query{ + buildQuery(`SELECT v FROM t1 WHERE k=?`, rand.Intn(5)), + }) if err != nil { t.Errorf("error occurred: %v", err) - } else if err = c.VerifyAndPushAckedQuery(ack); err != nil { - t.Errorf("error occurred: %v", err) } } } diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index 5f2fcb45e..e242e81aa 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -22,6 +22,7 @@ import ( "os" "path" "sync" + "sync/atomic" "testing" "time" @@ -39,6 +40,7 @@ var ( genesisHash = hash.Hash{} testDifficulty = 4 testMasterKey = []byte(".9K.sgch!3;C>w0v") + testConnIDSeed = rand.Uint64() testDataDir string testPrivKeyFile string testPubKeysFile string @@ -48,12 +50,16 @@ var ( ) type nodeProfile struct { - NodeID proto.NodeID - PrivateKey *asymmetric.PrivateKey - PublicKey *asymmetric.PublicKey + NodeID proto.NodeID + PrivateKey *asymmetric.PrivateKey + PublicKey *asymmetric.PublicKey + ConnectionID uint64 + SeqNo uint64 + Chain *Chain + IsLeader bool } -func newRandomNode() (node *nodeProfile, err error) { +func newRandomNode(chain *Chain, isLeader bool) (node *nodeProfile, err error) { priv, pub, err := asymmetric.GenSecp256k1KeyPair() if err != nil { @@ -64,9 +70,13 @@ func newRandomNode() (node *nodeProfile, err error) { rand.Read(h[:]) node = &nodeProfile{ - NodeID: proto.NodeID(h.String()), - PrivateKey: priv, - PublicKey: pub, + NodeID: proto.NodeID(h.String()), + PrivateKey: priv, + PublicKey: pub, + ConnectionID: atomic.AddUint64(&testConnIDSeed, 1), + SeqNo: rand.Uint64(), + Chain: chain, + IsLeader: isLeader, } return @@ -369,3 +379,73 @@ func TestMain(m *testing.M) { return m.Run() }()) } + +func buildQuery(query string, args ...interface{}) types.Query { + var nargs = make([]types.NamedArg, len(args)) + for i := range args { + nargs[i] = types.NamedArg{ + Name: "", + Value: args[i], + } + } + return types.Query{ + Pattern: query, + Args: nargs, + } +} + +func (p *nodeProfile) buildQuery( + qt types.QueryType, qs []types.Query) (req *types.Request, err error, +) { + req = &types.Request{ + Header: types.SignedRequestHeader{ + RequestHeader: types.RequestHeader{ + QueryType: qt, + NodeID: p.NodeID, + DatabaseID: p.Chain.databaseID, + ConnectionID: p.ConnectionID, + SeqNo: atomic.AddUint64(&p.SeqNo, 1), + Timestamp: time.Now().UTC(), + // BatchCount and QueriesHash will be set by req.Sign() + }, + }, + Payload: types.RequestPayload{Queries: qs}, + } + if err = req.Sign(p.PrivateKey); err != nil { + return + } + return +} + +func (p *nodeProfile) sendQuery(req *types.Request) (err error) { + tracker, resp, err := p.Chain.Query(req, p.IsLeader) + if err != nil { + return + } + if err = resp.BuildHash(); err != nil { + return + } + if err = p.Chain.AddResponse(&resp.Header); err != nil { + return + } + tracker.UpdateResp(resp) + + ack, err := createRandomQueryAckWithResponse(&resp.Header, p) + if err != nil { + return + } + if err = p.Chain.VerifyAndPushAckedQuery(ack); err != nil { + return + } + return +} + +func (p *nodeProfile) query( + qt types.QueryType, qs []types.Query) (err error, +) { + req, err := p.buildQuery(qt, qs) + if err != nil { + return + } + return p.sendQuery(req) +} diff --git a/xenomint/pool.go b/xenomint/pool.go index 38a1f6511..33494e183 100644 --- a/xenomint/pool.go +++ b/xenomint/pool.go @@ -50,6 +50,7 @@ type pool struct { // Failed queries: hash => Request failed map[hash.Hash]*types.Request // Succeeded queries and their index + reads map[hash.Hash]*QueryTracker queries []*QueryTracker index map[uint64]int // Atomic counters for stats @@ -60,6 +61,7 @@ type pool struct { func newPool() *pool { return &pool{ failed: make(map[hash.Hash]*types.Request), + reads: make(map[hash.Hash]*QueryTracker), queries: make([]*QueryTracker, 0), index: make(map[uint64]int), } @@ -73,6 +75,11 @@ func (p *pool) enqueue(sp uint64, q *QueryTracker) { return } +func (p *pool) enqueueRead(q *QueryTracker) { + // NOTE(leventeliu): this overwrites any request with a same hash + p.reads[q.Req.Header.Hash()] = q +} + func (p *pool) setFailed(req *types.Request) { p.failed[req.Header.Hash()] = req atomic.StoreInt32(&p.failedRequestCount, int32(len(p.failed))) diff --git a/xenomint/state.go b/xenomint/state.go index 7bcbf5557..850f6fda7 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -226,6 +226,9 @@ func (s *State) readWithContext( } // Build query response ref = &QueryTracker{Req: req} + s.Lock() + s.pool.enqueueRead(ref) + s.Unlock() resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ @@ -291,6 +294,9 @@ func (s *State) readTx( } // Build query response ref = &QueryTracker{Req: req} + s.Lock() + s.pool.enqueueRead(ref) + s.Unlock() resp = &types.Response{ Header: types.SignedResponseHeader{ ResponseHeader: types.ResponseHeader{ @@ -630,6 +636,9 @@ func (s *State) CommitExWithContext( // Return pooled items and reset failed = s.pool.failedList() queries = s.pool.queries + for _, v := range s.pool.reads { + queries = append(queries, v) + } s.pool = newPool() poolCleaned = time.Since(start) return From 4cc255ab14747694e7785cf732391d72cc1b6a38 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Wed, 6 Mar 2019 16:54:10 +0800 Subject: [PATCH 070/244] Minor fix for meta linter --- sqlchain/chain_test.go | 6 ++++-- sqlchain/xxx_test.go | 2 +- xenomint/mux_test.go | 10 +++++++--- xenomint/state.go | 16 ++++++++++++---- xenomint/state_test.go | 6 +++--- xenomint/xxx_test.go | 2 +- 6 files changed, 28 insertions(+), 14 deletions(-) diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 4d7f226d1..6928a8834 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -241,7 +241,9 @@ func TestMultiChain(t *testing.T) { for _, n := range conf.GConf.KnownNodes { rawNodeID := n.ID.ToRawNodeID() - route.SetNodeAddrCache(rawNodeID, n.Addr) + if err = route.SetNodeAddrCache(rawNodeID, n.Addr); err != nil { + t.Fatalf("error occurred: %v", err) + } node := &proto.Node{ ID: n.ID, Addr: n.Addr, @@ -279,7 +281,7 @@ func TestMultiChain(t *testing.T) { defer func(c *Chain) { // Stop chain main process before exit - c.Stop() + _ = c.Stop() }(v.chain) } diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go index e242e81aa..994555fad 100644 --- a/sqlchain/xxx_test.go +++ b/sqlchain/xxx_test.go @@ -226,7 +226,7 @@ func registerNodesWithPublicKey(pub *asymmetric.PublicKey, diff int, num int) ( wg.Add(1) go func() { defer wg.Done() - miner.ComputeBlockNonce(block, next, diff) + _ = miner.ComputeBlockNonce(block, next, diff) }() n := <-nCh nis[i] = n diff --git a/xenomint/mux_test.go b/xenomint/mux_test.go index 215f80ce9..777b0a184 100644 --- a/xenomint/mux_test.go +++ b/xenomint/mux_test.go @@ -101,8 +101,12 @@ func setupMuxParallel(priv *ca.PrivateKey) ( } kms.SetLocalNodeIDNonce(nis[2].ID.ToRawNodeID().CloneBytes(), &nis[2].Nonce) for i := range nis { - route.SetNodeAddrCache(nis[i].ID.ToRawNodeID(), nis[i].Addr) - kms.SetNode(&nis[i]) + if err = route.SetNodeAddrCache(nis[i].ID.ToRawNodeID(), nis[i].Addr); err != nil { + return + } + if err = kms.SetNode(&nis[i]); err != nil { + return + } } // Register mux service if ms, err = NewMuxService(benchmarkRPCName, mnSv); err != nil { @@ -277,7 +281,7 @@ func TestMuxService(t *testing.T) { ms.register(benchmarkDatabaseID, c) defer func() { ms.unregister(benchmarkDatabaseID) - teardownChain(t.Name(), c) + _ = teardownChain(t.Name(), c) }() // Setup query requests diff --git a/xenomint/state.go b/xenomint/state.go index 850f6fda7..3fe698cd4 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -168,7 +168,9 @@ func readSingle( if rows, err = qer.QueryContext(ctx, pattern, args...); err != nil { return } - defer rows.Close() + defer func() { + _ = rows.Close() + }() // Fetch column names and types if names, err = rows.Columns(); err != nil { return @@ -271,7 +273,9 @@ func (s *State) readTx( return } querier = tx - defer tx.Rollback() + defer func() { + _ = tx.Rollback() + }() } defer func() { @@ -408,12 +412,16 @@ func (s *State) write( err = errors.Wrapf(ierr, "failed to create savepoint %d", lastSeq) return } - defer s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) + defer func() { + _, _ = s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) + }() } if s.level != sql.LevelReadUncommitted { // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to // committed ones. - defer s.executer.Exec(`ROLLBACK`) + defer func() { + _, _ = s.executer.Exec(`ROLLBACK`) + }() } for i, v := range req.Payload.Queries { var res sql.Result diff --git a/xenomint/state_test.go b/xenomint/state_test.go index 9811f38be..de962e931 100644 --- a/xenomint/state_test.go +++ b/xenomint/state_test.go @@ -741,10 +741,10 @@ func TestSerializableState(t *testing.T) { for { _, resp, err = state.Query(iReq, true) c.So(err, ShouldBeNil) - c.Printf("insert affected rows: %d\n", resp.Header.AffectedRows) + _, _ = c.Printf("insert affected rows: %d\n", resp.Header.AffectedRows) _, resp, err = state.Query(dReq, true) c.So(err, ShouldBeNil) - c.Printf("delete affected rows: %d\n", resp.Header.AffectedRows) + _, _ = c.Printf("delete affected rows: %d\n", resp.Header.AffectedRows) select { case <-ctx.Done(): return @@ -766,7 +766,7 @@ func TestSerializableState(t *testing.T) { DeclTypes: []string{""}, Rows: []types.ResponseRow{{Values: []interface{}{int64(count)}}}, }), ShouldBeTrue) - Printf("index = %d, count = %v\n", i, resp) + _, _ = Printf("index = %d, count = %v\n", i, resp) } }) Convey("The state should not see uncommitted changes", func(c C) { diff --git a/xenomint/xxx_test.go b/xenomint/xxx_test.go index b0a7686d0..07d053f2f 100644 --- a/xenomint/xxx_test.go +++ b/xenomint/xxx_test.go @@ -136,7 +136,7 @@ func createNodesWithPublicKey( wg.Add(1) go func() { defer wg.Done() - miner.ComputeBlockNonce(block, next, diff) + _ = miner.ComputeBlockNonce(block, next, diff) }() ni = <-nic nis[i] = proto.Node{ From 14adc2bb5f002ed4b3905b7f5349fa3f2208b233 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Wed, 6 Mar 2019 17:08:36 +0800 Subject: [PATCH 071/244] Refactor internal interfaces --- xenomint/state.go | 77 +++++++++++++++++++++++------------------------ 1 file changed, 38 insertions(+), 39 deletions(-) diff --git a/xenomint/state.go b/xenomint/state.go index 3fe698cd4..0de4731e1 100644 --- a/xenomint/state.go +++ b/xenomint/state.go @@ -37,23 +37,18 @@ type sqlQuerier interface { } type sqlExecuter interface { - sqlQuerier Exec(query string, args ...interface{}) (sql.Result, error) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) - Commit() error - Rollback() error -} - -type sqlDB struct { - *sql.DB } -func (db *sqlDB) Commit() error { - return nil +type sqlTransaction interface { + Commit() error + Rollback() error } -func (db *sqlDB) Rollback() error { - return nil +type sqlHandler interface { + sqlQuerier + sqlExecuter } // State defines a xenomint state which is bound to a underlying storage. @@ -66,7 +61,7 @@ type State struct { closed bool nodeID proto.NodeID - executer sqlExecuter + handler sqlHandler maxTx uint64 lastCommitPoint uint64 current uint64 // current is the current lastSeq of the current transaction @@ -82,18 +77,18 @@ func NewState(level sql.IsolationLevel, nodeID proto.NodeID, strg xi.Storage) (s pool: newPool(), maxTx: 100, } - s.openSQLExecuter() + s.openHandler() return } -func (s *State) openSQLExecuter() { +func (s *State) openHandler() { if s.level == sql.LevelReadUncommitted { var err error - if s.executer, err = s.strg.Writer().Begin(); err != nil { + if s.handler, err = s.strg.Writer().Begin(); err != nil { log.WithError(err).Fatal("failed to open transaction") } } else { - s.executer = &sqlDB{DB: s.strg.Writer()} + s.handler = s.strg.Writer() } } @@ -128,11 +123,11 @@ func (s *State) Close(commit bool) (err error) { if s.closed { return } - if s.executer != nil { + if s.handler != nil { if commit { - s.commitSQLExecuter() + s.commitHandler() } else { - s.rollbackSQLExecuter() + s.rollbackHandler() } } if err = s.strg.Close(); err != nil { @@ -265,7 +260,7 @@ func (s *State) readTx( // lock transaction s.Lock() defer s.Unlock() - querier = s.executer + querier = s.handler } else { var tx *sql.Tx if tx, ierr = s.reader().Begin(); ierr != nil { @@ -348,7 +343,7 @@ func (s *State) writeSingle( return } //parsed = time.Since(start) - if res, err = s.executer.Exec(pattern, args...); err == nil { + if res, err = s.handler.Exec(pattern, args...); err == nil { if containsDDL { atomic.StoreUint32(&s.hasSchemaChange, 1) } @@ -408,19 +403,19 @@ func (s *State) write( lastSeq = s.getSeq() if qcnt > 1 && s.level == sql.LevelReadUncommitted { // Set savepoint - if _, ierr = s.executer.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { + if _, ierr = s.handler.Exec(`SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to create savepoint %d", lastSeq) return } defer func() { - _, _ = s.executer.Exec(`ROLLBACK TO "?"`, lastSeq) + _, _ = s.handler.Exec(`ROLLBACK TO "?"`, lastSeq) }() } if s.level != sql.LevelReadUncommitted { // NOTE(leventeliu): this will cancel any uncommitted transaction, and do not harm to // committed ones. defer func() { - _, _ = s.executer.Exec(`ROLLBACK`) + _, _ = s.handler.Exec(`ROLLBACK`) }() } for i, v := range req.Payload.Queries { @@ -440,7 +435,7 @@ func (s *State) write( if s.level == sql.LevelReadUncommitted { if qcnt > 1 { // Release savepoint - if _, ierr = s.executer.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { + if _, ierr = s.handler.Exec(`RELEASE SAVEPOINT "?"`, lastSeq); ierr != nil { err = errors.Wrapf(ierr, "failed to release savepoint %d", lastSeq) return } @@ -449,7 +444,7 @@ func (s *State) write( // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.flushSQLExecuter() + s.flushHandler() } writeDone = time.Since(start) if isLeader { @@ -505,7 +500,7 @@ func (s *State) replay(ctx context.Context, req *types.Request, resp *types.Resp // Try to commit if the ongoing tx is too large or schema is changed if s.getSeq()-s.getLastCommitPoint() > s.maxTx || atomic.LoadUint32(&s.hasSchemaChange) != 0 { - s.flushSQLExecuter() + s.flushHandler() } s.pool.enqueue(lastSeq, query) return @@ -555,7 +550,7 @@ func (s *State) ReplayBlockWithContext(ctx context.Context, block *types.Block) s.pool.enqueue(lastsp, query) } // Always try to commit after a block is successfully replayed - s.flushSQLExecuter() + s.flushHandler() // Remove duplicate failed queries from local pool for _, r := range block.FailedReqs { s.pool.removeFailed(r) @@ -593,7 +588,7 @@ func (s *State) commit() (err error) { lockReleased = time.Since(start) }() lockAcquired = time.Since(start) - s.flushSQLExecuter() + s.flushHandler() committed = time.Since(start) _ = s.pool.queries s.pool = newPool() @@ -639,7 +634,7 @@ func (s *State) CommitExWithContext( lockReleased = time.Since(start) }() // Always try to commit before the block is produced - s.flushSQLExecuter() + s.flushHandler() committed = time.Since(start) // Return pooled items and reset failed = s.pool.failedList() @@ -652,23 +647,27 @@ func (s *State) CommitExWithContext( return } -func (s *State) flushSQLExecuter() { - s.commitSQLExecuter() - s.openSQLExecuter() +func (s *State) flushHandler() { + s.commitHandler() + s.openHandler() } -func (s *State) commitSQLExecuter() { - if err := s.executer.Commit(); err != nil { - log.WithError(err).Fatal("failed to commit") +func (s *State) commitHandler() { + if tx, ok := s.handler.(sqlTransaction); ok { + if err := tx.Commit(); err != nil { + log.WithError(err).Fatal("failed to commit") + } } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) atomic.StoreUint64(&s.lastCommitPoint, s.getSeq()) } -func (s *State) rollbackSQLExecuter() { - if err := s.executer.Rollback(); err != nil { - log.WithError(err).Fatal("failed to rollback") +func (s *State) rollbackHandler() { + if tx, ok := s.handler.(sqlTransaction); ok { + if err := tx.Rollback(); err != nil { + log.WithError(err).Fatal("failed to rollback") + } } // reset schema change flag atomic.StoreUint32(&s.hasSchemaChange, 0) From ece69dd326d3fdcf54f8034455b5bdc83c880b80 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Wed, 6 Mar 2019 17:33:45 +0800 Subject: [PATCH 072/244] Remove unused config fields --- sqlchain/config.go | 8 +------- sqlchain/runtime.go | 2 +- sqlchain/runtime_test.go | 12 ++++++------ 3 files changed, 8 insertions(+), 14 deletions(-) diff --git a/sqlchain/config.go b/sqlchain/config.go index 3822870e7..b43e5a3c7 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -37,14 +37,8 @@ type Config struct { Peers *proto.Peers Server proto.NodeID - // Price sets query price in gases. - Price map[types.QueryType]uint64 - ProducingReward uint64 - BillingPeriods int32 - // QueryTTL sets the unacknowledged query TTL in block periods. - QueryTTL int32 - + QueryTTL int32 BlockCacheTTL int32 // DBAccount info diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index b60a6a2e3..15bc069eb 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -88,7 +88,7 @@ type runtime struct { } func blockCacheTTLRequired(c *Config) (ttl int32) { - var billingRequiredTTL = 2 * c.BillingPeriods + var billingRequiredTTL = int32(2 * c.UpdatePeriod) ttl = c.BlockCacheTTL if ttl < minBlockCacheTTL { ttl = minBlockCacheTTL diff --git a/sqlchain/runtime_test.go b/sqlchain/runtime_test.go index 040dc6785..bd59a0d25 100644 --- a/sqlchain/runtime_test.go +++ b/sqlchain/runtime_test.go @@ -30,22 +30,22 @@ func TestBlockCacheTTL(t *testing.T) { }{ { config: &Config{ - BlockCacheTTL: 0, - BillingPeriods: 0, + BlockCacheTTL: 0, + UpdatePeriod: 0, }, expect: minBlockCacheTTL, }, { config: &Config{ - BlockCacheTTL: minBlockCacheTTL + 1, - BillingPeriods: 0, + BlockCacheTTL: minBlockCacheTTL + 1, + UpdatePeriod: 0, }, expect: minBlockCacheTTL + 1, }, { config: &Config{ - BlockCacheTTL: 0, - BillingPeriods: minBlockCacheTTL + 1, + BlockCacheTTL: 0, + UpdatePeriod: uint64(minBlockCacheTTL + 1), }, expect: 2 * (minBlockCacheTTL + 1), }, From 5739ec5bc3974f07da4ff5dd2f66151030398dcb Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Wed, 6 Mar 2019 17:50:01 +0800 Subject: [PATCH 073/244] Skip compatibility test for beta branch --- .gitlab-ci.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4865840e5..5b9abed7c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -69,6 +69,9 @@ old-client-compatibility: - set -o errexit - set -o pipefail - set -x + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh client @@ -79,6 +82,9 @@ old-bp-compatibility: - set -o errexit - set -o pipefail - set -x + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh bp @@ -89,6 +95,9 @@ old-miner-compatibility: - set -o errexit - set -o pipefail - set -x + - commit=$(git rev-parse --short HEAD) + - branch=$(git branch -rv |grep $commit | awk '{print $1}') + - if [[ $branch =~ "/beta_" ]]; then exit 0; fi - make clean - cp ${BIN_CACHE}/* bin/ - ./test/compatibility/specific_old.sh miner From 7fc4d96be2d7bbd253ad31276ef5123edc4a0bad Mon Sep 17 00:00:00 2001 From: auxten Date: Wed, 6 Mar 2019 18:06:51 +0800 Subject: [PATCH 074/244] Use host default name server in docker --- bin/docker-entry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index f1da213d6..d5040bfe6 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -1,6 +1,6 @@ #!/bin/sh -echo nameserver 114.114.114.114 > /etc/resolv.conf +# echo nameserver 114.114.114.114 > /etc/resolv.conf [ -s "${COVENANT_ALERT}" ] && [ -x "${COVENANT_ALERT}" ] && (eval "${COVENANT_ALERT}") From de0c00a146bf5f5c2db8257c7967381db8021aa7 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 11:45:47 +0800 Subject: [PATCH 075/244] Fix bug on chain startup --- sqlchain/chain.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 01f5a542e..897fb1e96 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -637,6 +637,11 @@ func (c *Chain) sync() { } for c.rt.getNextTurn() <= height { c.syncHead() + c.stat() + c.pruneBlockCache() + c.rt.setNextTurn() + c.ai.advance(c.rt.getMinValidHeight()) + c.heights <- c.rt.getHead().Height } } } From a8c8c9f8b2caca1806556deda232bde456ad5ef1 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 12:19:06 +0800 Subject: [PATCH 076/244] Fix bug: billing on empty blocks --- sqlchain/chain.go | 98 +++++++++++++++++++++++++---------------------- 1 file changed, 52 insertions(+), 46 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 897fb1e96..30560ad93 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -625,6 +625,11 @@ func (c *Chain) mainCycle(ctx context.Context) { func (c *Chain) sync() { le := c.logEntry() le.Debug("synchronizing chain state") + defer func() { + c.stat() + c.pruneBlockCache() + c.ai.advance(c.rt.getMinValidHeight()) + }() for { now := c.rt.now() height := c.rt.getHeightFromTime(now) @@ -637,11 +642,7 @@ func (c *Chain) sync() { } for c.rt.getNextTurn() <= height { c.syncHead() - c.stat() - c.pruneBlockCache() c.rt.setNextTurn() - c.ai.advance(c.rt.getMinValidHeight()) - c.heights <- c.rt.getHead().Height } } } @@ -673,8 +674,38 @@ func (c *Chain) processBlocks(ctx context.Context) { var stash []*types.Block for { + le := c.logEntryWithHeadState() select { case h := <-c.heights: + // Trigger billing + head := c.rt.getHead() + if uint64(h)%c.updatePeriod == 0 { + ub, err := c.billing(h, head.node) + if err != nil { + le.WithError(err).Error("billing failed") + } + // allocate nonce + nonceReq := &types.NextAccountNonceReq{} + nonceResp := &types.NextAccountNonceResp{} + nonceReq.Addr = *c.addr + if err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { + // allocate nonce failed + le.WithError(err).Warning("allocate nonce for transaction failed") + } + ub.Nonce = nonceResp.Nonce + if err = ub.Sign(c.pk); err != nil { + le.WithError(err).Warning("sign tx failed") + } + + addTxReq := &types.AddTxReq{TTL: 1} + addTxResp := &types.AddTxResp{} + addTxReq.Tx = ub + le.Debugf("nonce in processBlocks: %d, addr: %s", + addTxReq.Tx.GetAccountNonce(), addTxReq.Tx.GetAccountAddress()) + if err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp); err != nil { + le.WithError(err).Warning("send tx failed") + } + } // Return all stashed blocks to pending channel c.logEntryWithHeadState().WithFields(log.Fields{ "height": h, @@ -687,11 +718,10 @@ func (c *Chain) processBlocks(ctx context.Context) { } case block := <-c.blocks: height := c.rt.getHeightFromTime(block.Timestamp()) - le := c.logEntryWithHeadState().WithFields(log.Fields{ + le.WithFields(log.Fields{ "block_height": height, "block_hash": block.BlockHash().String(), - }) - le.Debug("processing new block") + }).Debug("processing new block") if height > c.rt.getNextTurn()-1 { // Stash newer blocks for later check @@ -704,35 +734,6 @@ func (c *Chain) processBlocks(ctx context.Context) { if err := c.CheckAndPushNewBlock(block); err != nil { le.WithError(err).Error("failed to check and push new block") } else { - head := c.rt.getHead() - currentCount := uint64(head.node.count) - if currentCount%c.updatePeriod == 0 { - ub, err := c.billing(head.node) - if err != nil { - le.WithError(err).Error("billing failed") - } - // allocate nonce - nonceReq := &types.NextAccountNonceReq{} - nonceResp := &types.NextAccountNonceResp{} - nonceReq.Addr = *c.addr - if err = rpc.RequestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { - // allocate nonce failed - le.WithError(err).Warning("allocate nonce for transaction failed") - } - ub.Nonce = nonceResp.Nonce - if err = ub.Sign(c.pk); err != nil { - le.WithError(err).Warning("sign tx failed") - } - - addTxReq := &types.AddTxReq{TTL: 1} - addTxResp := &types.AddTxResp{} - addTxReq.Tx = ub - le.Debugf("nonce in processBlocks: %d, addr: %s", - addTxReq.Tx.GetAccountNonce(), addTxReq.Tx.GetAccountAddress()) - if err = rpc.RequestBP(route.MCCAddTx.String(), addTxReq, addTxResp); err != nil { - le.WithError(err).Warning("send tx failed") - } - } } } } @@ -983,28 +984,33 @@ func (c *Chain) stat() { c.st.Stat(c.databaseID) } -func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { - log.WithField("db", c.databaseID).Debugf("begin to billing from count %d", node.count) +func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err error) { + le := c.logEntryWithHeadState() + le.WithFields(log.Fields{"given_height": h}).Debug("begin to billing") var ( i, j uint64 + iter *blockNode minerAddr proto.AccountAddress userAddr proto.AccountAddress + minHeight = h - int32(c.updatePeriod) usersMap = make(map[proto.AccountAddress]uint64) minersMap = make(map[proto.AccountAddress]map[proto.AccountAddress]uint64) ) - for i = 0; i < c.updatePeriod && node != nil; i++ { - var block = node.block + for iter = node; iter != nil && iter.height > h; iter = iter.parent { + } + for i = 0; i < c.updatePeriod && iter != nil && iter.height > minHeight; i++ { + var block = iter.block // Not cached, recover from storage if block == nil { - if block, err = c.FetchBlock(node.height); err != nil { + if block, err = c.FetchBlock(iter.height); err != nil { return } } for _, tx := range block.QueryTxs { minerAddr = tx.Response.ResponseAccount if userAddr, err = crypto.PubKeyHash(tx.Request.Header.Signee); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") + le.WithError(err).Warning("billing fail: miner addr") return } @@ -1022,11 +1028,11 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { for _, req := range block.FailedReqs { if minerAddr, err = crypto.PubKeyHash(block.Signee()); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: miner addr") + le.WithError(err).Warning("billing fail: miner addr") return } if userAddr, err = crypto.PubKeyHash(req.Header.Signee); err != nil { - log.WithError(err).WithField("db", c.databaseID).Warning("billing fail: user addr") + le.WithError(err).Warning("billing fail: user addr") return } if _, ok := minersMap[userAddr][minerAddr]; !ok { @@ -1036,7 +1042,7 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { minersMap[userAddr][minerAddr] += uint64(len(req.Payload.Queries)) usersMap[userAddr] += uint64(len(req.Payload.Queries)) } - node = node.parent + iter = iter.parent } ub = types.NewUpdateBilling(&types.UpdateBillingHeader{ @@ -1046,7 +1052,7 @@ func (c *Chain) billing(node *blockNode) (ub *types.UpdateBilling, err error) { i = 0 j = 0 for userAddr, cost := range usersMap { - log.WithField("db", c.databaseID).Debugf("user %s, cost %d", userAddr.String(), cost) + le.Debugf("user %s, cost %d", userAddr.String(), cost) ub.Users[i] = &types.UserCost{ User: userAddr, Cost: cost, From 95a864d0c247826da42affc30138b64ac056e7ff Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 12:45:51 +0800 Subject: [PATCH 077/244] Minor fix --- alltest.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/alltest.sh b/alltest.sh index 31421cda0..4e82157da 100755 --- a/alltest.sh +++ b/alltest.sh @@ -5,17 +5,17 @@ set -o pipefail set -o nounset main() { - go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) - go test -tags "$UNITTESTTAGS" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... + go test -tags "${UNITTESTTAGS:-}" -race -failfast -parallel 16 -cpu 16 -coverprofile main.cover.out $(go list ./... | grep -v CovenantSQL/api) + go test -tags "${UNITTESTTAGS:-}" -race -failfast -parallel 16 -cpu 16 -coverpkg ./api/...,./rpc/jsonrpc -coverprofile api.cover.out ./api/... set -x gocovmerge main.cover.out api.cover.out $(find cmd -name "*.cover.out") | grep -F -v '_gen.go' > coverage.txt && rm -f *.cover.out bash <(curl -s https://codecov.io/bash) # some benchmarks - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ + go test -tags "${UNITTESTTAGS:-}" -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ bash cleanupDB.sh || true - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + go test -tags "${UNITTESTTAGS:-}" -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ bash cleanupDB.sh || true } From 5a98458245637d01121f102250d8c45f69974a90 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 14:39:20 +0800 Subject: [PATCH 078/244] Add select statement for channel operator --- sqlchain/chain.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 30560ad93..8493373e9 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -577,6 +577,10 @@ func (c *Chain) syncHead() { // runCurrentTurn does the check and runs block producing if its my turn. func (c *Chain) runCurrentTurn(now time.Time) { + le := c.logEntryWithHeadState().WithFields(log.Fields{ + "using_timestamp": now.Format(time.RFC3339Nano), + }) + defer func() { c.stat() c.pruneBlockCache() @@ -584,13 +588,13 @@ func (c *Chain) runCurrentTurn(now time.Time) { c.ai.advance(c.rt.getMinValidHeight()) // Info the block processing goroutine that the chain height has grown, so please return // any stashed blocks for further check. - c.heights <- c.rt.getHead().Height + select { + case c.heights <- c.rt.getHead().Height: + case <-c.rt.ctx.Done(): + le.Debug("abort publishing height") + } }() - le := c.logEntryWithHeadState().WithFields(log.Fields{ - "using_timestamp": now.Format(time.RFC3339Nano), - }) - le.Debug("run current turn") if c.rt.getHead().Height < c.rt.getNextTurn()-1 { le.Error("a block will be skipped") From 05b24c4ca16276df781ea11c2fbc81521475ce0d Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 15:39:35 +0800 Subject: [PATCH 079/244] Minor fix --- cmd/cql-minerd/integration_test.go | 2 +- sqlchain/chain.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index ff818f234..9d8202538 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -770,7 +770,7 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualCon // create meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - Node: minerCount, + Node: minerCount, UseEventualConsistency: useEventualConsistency, }, } diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 8493373e9..30c28d0bb 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -990,7 +990,7 @@ func (c *Chain) stat() { func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err error) { le := c.logEntryWithHeadState() - le.WithFields(log.Fields{"given_height": h}).Debug("begin to billing") + le.WithFields(log.Fields{"given_height": h}).Info("begin to billing") var ( i, j uint64 iter *blockNode @@ -1003,7 +1003,7 @@ func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err for iter = node; iter != nil && iter.height > h; iter = iter.parent { } - for i = 0; i < c.updatePeriod && iter != nil && iter.height > minHeight; i++ { + for iter != nil && iter.height > minHeight { var block = iter.block // Not cached, recover from storage if block == nil { From e99f434b69d786a944e2ce567e2ec6f7c03ca49f Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 16:13:22 +0800 Subject: [PATCH 080/244] Remove obsolete billing protocol --- blockproducer/metastate.go | 17 -- blockproducer/metastate_test.go | 51 ++--- types/billing.go | 98 --------- types/billing_gen.go | 95 --------- types/billing_gen_test.go | 84 -------- types/billing_req.go | 33 --- types/billing_request.go | 155 -------------- types/billing_request_gen.go | 123 ----------- types/billing_request_gen_test.go | 84 -------- types/billing_request_test.go | 339 ------------------------------ types/billing_test.go | 141 ------------- types/bprpc.go | 11 - types/xxx_test.go | 98 --------- 13 files changed, 15 insertions(+), 1314 deletions(-) delete mode 100644 types/billing.go delete mode 100644 types/billing_gen.go delete mode 100644 types/billing_gen_test.go delete mode 100644 types/billing_req.go delete mode 100644 types/billing_request.go delete mode 100644 types/billing_request_gen.go delete mode 100644 types/billing_request_gen_test.go delete mode 100644 types/billing_request_test.go delete mode 100644 types/billing_test.go diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 2473d899b..59d13687b 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -509,21 +509,6 @@ func (s *metaState) increaseNonce(addr proto.AccountAddress) (err error) { return } -func (s *metaState) applyBilling(tx *types.Billing) (err error) { - for i, v := range tx.Receivers { - // Create empty receiver account if not found - s.loadOrStoreAccountObject(*v, &types.Account{Address: *v}) - - if err = s.increaseAccountCovenantBalance(*v, tx.Fees[i]); err != nil { - return - } - if err = s.increaseAccountStableBalance(*v, tx.Rewards[i]); err != nil { - return - } - } - return -} - func (s *metaState) updateProviderList(tx *types.ProvideService) (err error) { sender, err := crypto.PubKeyHash(tx.Signee) if err != nil { @@ -1135,8 +1120,6 @@ func (s *metaState) applyTransaction(tx pi.Transaction) (err error) { err = s.transferAccountToken(t) } return - case *types.Billing: - err = s.applyBilling(t) case *types.BaseAccount: err = s.storeBaseAccount(t.Address, &t.Account) case *types.ProvideService: diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index 80620e5a0..7f1d5a4cb 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -489,13 +489,12 @@ func TestMetaState(t *testing.T) { Amount: 0, }, ) - t2 = types.NewBilling( - &types.BillingHeader{ - Nonce: 2, - Producer: addr1, - Receivers: []*proto.AccountAddress{&addr2}, - Fees: []uint64{1}, - Rewards: []uint64{1}, + t2 = types.NewTransfer( + &types.TransferHeader{ + Sender: addr1, + Receiver: addr2, + Nonce: 2, + Amount: 0, }, ) ) @@ -557,29 +556,11 @@ func TestMetaState(t *testing.T) { Amount: 10, }, ), - types.NewBilling( - &types.BillingHeader{ - Nonce: 2, - Producer: addr1, - Receivers: []*proto.AccountAddress{&addr2}, - Fees: []uint64{1}, - Rewards: []uint64{1}, - }, - ), - types.NewBilling( - &types.BillingHeader{ - Nonce: 1, - Producer: addr2, - Receivers: []*proto.AccountAddress{&addr1}, - Fees: []uint64{1}, - Rewards: []uint64{1}, - }, - ), types.NewTransfer( &types.TransferHeader{ Sender: addr2, Receiver: addr1, - Nonce: 2, + Nonce: 1, Amount: 1, }, ), @@ -587,7 +568,7 @@ func TestMetaState(t *testing.T) { &types.TransferHeader{ Sender: addr1, Receiver: addr2, - Nonce: 3, + Nonce: 2, Amount: 10, }, ), @@ -595,7 +576,7 @@ func TestMetaState(t *testing.T) { &types.TransferHeader{ Sender: addr2, Receiver: addr1, - Nonce: 3, + Nonce: 2, Amount: 1, }, ), @@ -603,7 +584,7 @@ func TestMetaState(t *testing.T) { &types.TransferHeader{ Sender: addr2, Receiver: addr1, - Nonce: 4, + Nonce: 3, Amount: 1, }, ), @@ -612,12 +593,10 @@ func TestMetaState(t *testing.T) { txs[0].Sign(privKey1) txs[1].Sign(privKey2) txs[2].Sign(privKey1) - txs[3].Sign(privKey1) - txs[4].Sign(privKey2) + txs[3].Sign(privKey2) + txs[4].Sign(privKey1) txs[5].Sign(privKey2) - txs[6].Sign(privKey1) - txs[7].Sign(privKey2) - txs[8].Sign(privKey2) + txs[6].Sign(privKey2) for _, tx := range txs { err = ms.apply(tx) So(err, ShouldBeNil) @@ -626,10 +605,10 @@ func TestMetaState(t *testing.T) { Convey("The state should match the update result", func() { bl, loaded = ms.loadAccountTokenBalance(addr1, types.Particle) So(loaded, ShouldBeTrue) - So(bl, ShouldEqual, 84) + So(bl, ShouldEqual, 83) bl, loaded = ms.loadAccountTokenBalance(addr2, types.Particle) So(loaded, ShouldBeTrue) - So(bl, ShouldEqual, 118) + So(bl, ShouldEqual, 117) }) }) Convey("When SQLChain are created", func() { diff --git a/types/billing.go b/types/billing.go deleted file mode 100644 index a57ae9184..000000000 --- a/types/billing.go +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/verifier" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp - -// BillingHeader defines the customer's billing and block rewards in transaction. -type BillingHeader struct { - // Transaction nonce - Nonce pi.AccountNonce - BillingRequest BillingRequest - // Bill producer - Producer proto.AccountAddress - // Bill receivers - Receivers []*proto.AccountAddress - // Fee paid by stable coin - Fees []uint64 - // Reward is share coin - Rewards []uint64 -} - -// NewBillingHeader generates new BillingHeader. -func NewBillingHeader(nonce pi.AccountNonce, bReq *BillingRequest, producer proto.AccountAddress, receivers []*proto.AccountAddress, - fees []uint64, rewards []uint64) *BillingHeader { - return &BillingHeader{ - Nonce: nonce, - BillingRequest: *bReq, - Producer: producer, - Receivers: receivers, - Fees: fees, - Rewards: rewards, - } -} - -// Billing is a type of tx, that is used to record sql chain billing and block rewards. -type Billing struct { - BillingHeader - pi.TransactionTypeMixin - verifier.DefaultHashSignVerifierImpl -} - -// NewBilling generates a new Billing. -func NewBilling(header *BillingHeader) *Billing { - return &Billing{ - BillingHeader: *header, - TransactionTypeMixin: *pi.NewTransactionTypeMixin(pi.TransactionTypeBilling), - } -} - -// Sign implements interfaces/Transaction.Sign. -func (tb *Billing) Sign(signer *asymmetric.PrivateKey) (err error) { - return tb.DefaultHashSignVerifierImpl.Sign(&tb.BillingHeader, signer) -} - -// Verify implements interfaces/Transaction.Verify. -func (tb *Billing) Verify() error { - return tb.DefaultHashSignVerifierImpl.Verify(&tb.BillingHeader) -} - -// GetAccountAddress implements interfaces/Transaction.GetAccountAddress. -func (tb *Billing) GetAccountAddress() proto.AccountAddress { - return tb.Producer -} - -// GetAccountNonce implements interfaces/Transaction.GetAccountNonce. -func (tb *Billing) GetAccountNonce() pi.AccountNonce { - return tb.Nonce -} - -// GetDatabaseID gets the database ID. -func (tb *Billing) GetDatabaseID() proto.DatabaseID { - return tb.BillingRequest.Header.DatabaseID -} - -func init() { - pi.RegisterTransaction(pi.TransactionTypeBilling, (*Billing)(nil)) -} diff --git a/types/billing_gen.go b/types/billing_gen.go deleted file mode 100644 index 6d5d57f6b..000000000 --- a/types/billing_gen.go +++ /dev/null @@ -1,95 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *Billing) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83) - if oTemp, err := z.BillingHeader.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.DefaultHashSignVerifierImpl.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.TransactionTypeMixin.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *Billing) Msgsize() (s int) { - s = 1 + 14 + z.BillingHeader.Msgsize() + 28 + z.DefaultHashSignVerifierImpl.Msgsize() + 21 + z.TransactionTypeMixin.Msgsize() - return -} - -// MarshalHash marshals for hash -func (z *BillingHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 6 - o = append(o, 0x86) - if oTemp, err := z.BillingRequest.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Fees))) - for za0002 := range z.Fees { - o = hsp.AppendUint64(o, z.Fees[za0002]) - } - if oTemp, err := z.Nonce.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.Producer.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Receivers))) - for za0001 := range z.Receivers { - if z.Receivers[za0001] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Receivers[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Rewards))) - for za0003 := range z.Rewards { - o = hsp.AppendUint64(o, z.Rewards[za0003]) - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *BillingHeader) Msgsize() (s int) { - s = 1 + 15 + z.BillingRequest.Msgsize() + 5 + hsp.ArrayHeaderSize + (len(z.Fees) * (hsp.Uint64Size)) + 6 + z.Nonce.Msgsize() + 9 + z.Producer.Msgsize() + 10 + hsp.ArrayHeaderSize - for za0001 := range z.Receivers { - if z.Receivers[za0001] == nil { - s += hsp.NilSize - } else { - s += z.Receivers[za0001].Msgsize() - } - } - s += 8 + hsp.ArrayHeaderSize + (len(z.Rewards) * (hsp.Uint64Size)) - return -} diff --git a/types/billing_gen_test.go b/types/billing_gen_test.go deleted file mode 100644 index 845a15213..000000000 --- a/types/billing_gen_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "testing" -) - -func TestMarshalHashBilling(t *testing.T) { - v := Billing{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashBilling(b *testing.B) { - v := Billing{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgBilling(b *testing.B) { - v := Billing{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashBillingHeader(t *testing.T) { - v := BillingHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashBillingHeader(b *testing.B) { - v := BillingHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgBillingHeader(b *testing.B) { - v := BillingHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} diff --git a/types/billing_req.go b/types/billing_req.go deleted file mode 100644 index d3a50a210..000000000 --- a/types/billing_req.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "github.com/CovenantSQL/CovenantSQL/proto" -) - -// AdviseBillingReq defines a request of the AdviseBillingRequest RPC method. -type AdviseBillingReq struct { - proto.Envelope - Req *BillingRequest -} - -// AdviseBillingResp defines a request of the AdviseBillingRequest RPC method. -type AdviseBillingResp struct { - proto.Envelope - Resp *BillingRequest -} diff --git a/types/billing_request.go b/types/billing_request.go deleted file mode 100644 index 14d96c1b9..000000000 --- a/types/billing_request.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "reflect" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" -) - -//go:generate hsp - -// BillingRequestHeader includes contents that need to be signed. Billing blocks should be within -// height range [low, high] (inclusive). -type BillingRequestHeader struct { - DatabaseID proto.DatabaseID - // sqlchain block hash and its height - LowBlock hash.Hash - LowHeight int32 - HighBlock hash.Hash - HighHeight int32 - GasAmounts []*proto.AddrAndGas -} - -// BillingRequest defines periodically Billing sync. -type BillingRequest struct { - Header BillingRequestHeader - RequestHash hash.Hash - Signees []*asymmetric.PublicKey - Signatures []*asymmetric.Signature -} - -// PackRequestHeader computes the hash of header. -func (br *BillingRequest) PackRequestHeader() (h *hash.Hash, err error) { - var enc []byte - if enc, err = br.Header.MarshalHash(); err != nil { - return - } - - br.RequestHash = hash.THashH(enc) - h = &br.RequestHash - return -} - -// SignRequestHeader first computes the hash of BillingRequestHeader, then signs the request. -func (br *BillingRequest) SignRequestHeader(signer *asymmetric.PrivateKey, calcHash bool) ( - signee *asymmetric.PublicKey, signature *asymmetric.Signature, err error) { - if calcHash { - if _, err = br.PackRequestHeader(); err != nil { - return - } - } - - if signature, err = signer.Sign(br.RequestHash[:]); err == nil { - // append to current signatures - signee = signer.PubKey() - br.Signees = append(br.Signees, signee) - br.Signatures = append(br.Signatures, signature) - } - - return -} - -// AddSignature add existing signature to BillingRequest, requires the structure to be packed first. -func (br *BillingRequest) AddSignature( - signee *asymmetric.PublicKey, signature *asymmetric.Signature, calcHash bool) (err error) { - if calcHash { - if _, err = br.PackRequestHeader(); err != nil { - return - } - } - - if !signature.Verify(br.RequestHash[:], signee) { - err = ErrSignVerification - return - } - - // append - br.Signees = append(br.Signees, signee) - br.Signatures = append(br.Signatures, signature) - - return -} - -// VerifySignatures verify existing signatures. -func (br *BillingRequest) VerifySignatures() (err error) { - if len(br.Signees) != len(br.Signatures) { - return ErrSignVerification - } - - var enc []byte - if enc, err = br.Header.MarshalHash(); err != nil { - return - } - - h := hash.THashH(enc) - if !br.RequestHash.IsEqual(&h) { - return ErrSignVerification - } - - if len(br.Signees) == 0 { - return - } - - for idx, signee := range br.Signees { - if !br.Signatures[idx].Verify(br.RequestHash[:], signee) { - return ErrSignVerification - } - } - - return -} - -// Compare returns if two billing records are identical. -func (br *BillingRequest) Compare(r *BillingRequest) (err error) { - if !br.Header.LowBlock.IsEqual(&r.Header.LowBlock) || - !br.Header.HighBlock.IsEqual(&br.Header.HighBlock) { - err = ErrBillingNotMatch - return - } - - reqMap := make(map[proto.AccountAddress]*proto.AddrAndGas) - locMap := make(map[proto.AccountAddress]*proto.AddrAndGas) - - for _, v := range br.Header.GasAmounts { - reqMap[v.AccountAddress] = v - } - - for _, v := range r.Header.GasAmounts { - locMap[v.AccountAddress] = v - } - - if !reflect.DeepEqual(reqMap, locMap) { - err = ErrBillingNotMatch - return - } - - return -} diff --git a/types/billing_request_gen.go b/types/billing_request_gen.go deleted file mode 100644 index 9a631c25f..000000000 --- a/types/billing_request_gen.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - hsp "github.com/CovenantSQL/HashStablePack/marshalhash" -) - -// MarshalHash marshals for hash -func (z *BillingRequest) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 4 - o = append(o, 0x84) - if oTemp, err := z.Header.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - if oTemp, err := z.RequestHash.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Signatures))) - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signatures[za0002].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - o = hsp.AppendArrayHeader(o, uint32(len(z.Signees))) - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.Signees[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *BillingRequest) Msgsize() (s int) { - s = 1 + 7 + z.Header.Msgsize() + 12 + z.RequestHash.Msgsize() + 11 + hsp.ArrayHeaderSize - for za0002 := range z.Signatures { - if z.Signatures[za0002] == nil { - s += hsp.NilSize - } else { - s += z.Signatures[za0002].Msgsize() - } - } - s += 8 + hsp.ArrayHeaderSize - for za0001 := range z.Signees { - if z.Signees[za0001] == nil { - s += hsp.NilSize - } else { - s += z.Signees[za0001].Msgsize() - } - } - return -} - -// MarshalHash marshals for hash -func (z *BillingRequestHeader) MarshalHash() (o []byte, err error) { - var b []byte - o = hsp.Require(b, z.Msgsize()) - // map header, size 6 - o = append(o, 0x86) - if oTemp, err := z.DatabaseID.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendArrayHeader(o, uint32(len(z.GasAmounts))) - for za0001 := range z.GasAmounts { - if z.GasAmounts[za0001] == nil { - o = hsp.AppendNil(o) - } else { - if oTemp, err := z.GasAmounts[za0001].MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - } - } - if oTemp, err := z.HighBlock.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendInt32(o, z.HighHeight) - if oTemp, err := z.LowBlock.MarshalHash(); err != nil { - return nil, err - } else { - o = hsp.AppendBytes(o, oTemp) - } - o = hsp.AppendInt32(o, z.LowHeight) - return -} - -// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message -func (z *BillingRequestHeader) Msgsize() (s int) { - s = 1 + 11 + z.DatabaseID.Msgsize() + 11 + hsp.ArrayHeaderSize - for za0001 := range z.GasAmounts { - if z.GasAmounts[za0001] == nil { - s += hsp.NilSize - } else { - s += z.GasAmounts[za0001].Msgsize() - } - } - s += 10 + z.HighBlock.Msgsize() + 11 + hsp.Int32Size + 9 + z.LowBlock.Msgsize() + 10 + hsp.Int32Size - return -} diff --git a/types/billing_request_gen_test.go b/types/billing_request_gen_test.go deleted file mode 100644 index d46613c46..000000000 --- a/types/billing_request_gen_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package types - -// Code generated by github.com/CovenantSQL/HashStablePack DO NOT EDIT. - -import ( - "bytes" - "crypto/rand" - "encoding/binary" - "testing" -) - -func TestMarshalHashBillingRequest(t *testing.T) { - v := BillingRequest{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashBillingRequest(b *testing.B) { - v := BillingRequest{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgBillingRequest(b *testing.B) { - v := BillingRequest{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} - -func TestMarshalHashBillingRequestHeader(t *testing.T) { - v := BillingRequestHeader{} - binary.Read(rand.Reader, binary.BigEndian, &v) - bts1, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - bts2, err := v.MarshalHash() - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(bts1, bts2) { - t.Fatal("hash not stable") - } -} - -func BenchmarkMarshalHashBillingRequestHeader(b *testing.B) { - v := BillingRequestHeader{} - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - v.MarshalHash() - } -} - -func BenchmarkAppendMsgBillingRequestHeader(b *testing.B) { - v := BillingRequestHeader{} - bts := make([]byte, 0, v.Msgsize()) - bts, _ = v.MarshalHash() - b.SetBytes(int64(len(bts))) - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - bts, _ = v.MarshalHash() - } -} diff --git a/types/billing_request_test.go b/types/billing_request_test.go deleted file mode 100644 index c6d7ee998..000000000 --- a/types/billing_request_test.go +++ /dev/null @@ -1,339 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "reflect" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - peerNum = 32 -) - -func TestBillingRequestHeader_MarshalUnmarshalBinary(t *testing.T) { - reqHeader := generateRandomBillingRequestHeader() - b, err := utils.EncodeMsgPack(reqHeader) - if err != nil { - t.Fatalf("unexpect error when marshal request header: %v", err) - } - - newReqHeader := &BillingRequestHeader{} - err = utils.DecodeMsgPack(b.Bytes(), newReqHeader) - if err != nil { - t.Fatalf("unexpect error when unmashll request header: %v", err) - } - - if !reflect.DeepEqual(reqHeader, newReqHeader) { - t.Fatalf("values not match:\n\tv0=%+v\n\tv1=%+v", reqHeader, newReqHeader) - } -} - -func TestBillingRequest_MarshalUnmarshalBinary(t *testing.T) { - req, err := generateRandomBillingRequest() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - enc, err := utils.EncodeMsgPack(req) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - dec := &BillingRequest{} - err = utils.DecodeMsgPack(enc.Bytes(), dec) - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - if !reflect.DeepEqual(req, dec) { - log.Debug(req) - log.Debug(dec) - t.Fatal("values not match") - } -} - -func TestBillingRequest_PackRequestHeader(t *testing.T) { - req, err := generateRandomBillingRequest() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - enc, err := req.Header.MarshalHash() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - h := hash.THashH(enc) - if !h.IsEqual(&req.RequestHash) { - t.Fatalf("hash not matched: \n\tv1=%v\n\tv2=%v", req.RequestHash, h) - } -} - -func TestBillingRequest_SignRequestHeader(t *testing.T) { - req, err := generateRandomBillingRequest() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - enc, err := req.Header.MarshalHash() - if err != nil { - t.Fatalf("unexpected error: %v", err) - } - - h := hash.THashH(enc) - if !h.IsEqual(&req.RequestHash) { - t.Fatalf("hash not matched: \n\tv1=%v\n\tv2=%v", req.RequestHash, h) - } - - for i, sign := range req.Signatures { - if !sign.Verify(req.RequestHash[:], req.Signees[i]) { - - t.Fatalf("signature cannot match the hash and public key: %v", req) - } - } - - priv, pub, err := asymmetric.GenSecp256k1KeyPair() - _, sign, err := req.SignRequestHeader(priv, false) - if err != nil || !sign.Verify(req.RequestHash[:], pub) { - t.Fatalf("signature cannot match the hash and public key: %v", req) - } -} - -func TestBillingRequest_SignRequestHeader2(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - priv, _, err := asymmetric.GenSecp256k1KeyPair() - signee, sign, err := req.SignRequestHeader(priv, true) - if err != nil || !sign.Verify(req.RequestHash[:], signee) { - t.Fatalf("signature cannot match the hash and public key: %v", req) - } -} - -func TestBillingRequest_AddSignature(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - priv, _, err := asymmetric.GenSecp256k1KeyPair() - signee, sign, err := req.SignRequestHeader(priv, true) - if err != nil || !sign.Verify(req.RequestHash[:], signee) { - t.Fatalf("signature cannot match the hash and public key, req: %v, err: %v", req, err) - } - - // clear previous signees and signatures - req.Signees = req.Signees[:0] - req.Signatures = req.Signatures[:0] - - if err := req.AddSignature(signee, sign, false); err != nil { - t.Fatalf("add signature failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_AddSignature2(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - priv, _, err := asymmetric.GenSecp256k1KeyPair() - signee, sign, err := req.SignRequestHeader(priv, true) - if err != nil || !sign.Verify(req.RequestHash[:], signee) { - t.Fatalf("signature cannot match the hash and public key, req: %v, err: %v", req, err) - } - - // clear previous signees and signatures - req.RequestHash = hash.Hash{} - req.Signees = req.Signees[:0] - req.Signatures = req.Signatures[:0] - - if err := req.AddSignature(signee, sign, true); err != nil { - t.Fatalf("add signature failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_AddSignature3(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - priv, _, err := asymmetric.GenSecp256k1KeyPair() - signee, sign, err := req.SignRequestHeader(priv, true) - if err != nil || !sign.Verify(req.RequestHash[:], signee) { - t.Fatalf("signature cannot match the hash and public key, req: %v, err: %v", req, err) - } - - // clear previous signees and signatures - req.RequestHash = hash.Hash{} - req.Signees = req.Signees[:0] - req.Signatures = req.Signatures[:0] - - _, signee, _ = asymmetric.GenSecp256k1KeyPair() - if err := req.AddSignature(signee, sign, true); err != ErrSignVerification { - t.Fatalf("add signature should failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_VerifySignatures(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - addSignature := func(calcHash bool) { - priv, _, err := asymmetric.GenSecp256k1KeyPair() - _, _, err = req.SignRequestHeader(priv, calcHash) - if err != nil { - t.Fatalf("sign request failed, req: %v, err: %v", req, err) - } - } - - // add 3 signatures - addSignature(true) - addSignature(false) - addSignature(false) - - if err := req.VerifySignatures(); err != nil { - t.Fatalf("verify signature failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_VerifySignatures2(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - addSignature := func(calcHash bool) { - priv, _, err := asymmetric.GenSecp256k1KeyPair() - _, _, err = req.SignRequestHeader(priv, calcHash) - if err != nil { - t.Fatalf("sign request failed, req: %v, err: %v", req, err) - } - } - - // add 3 signatures - addSignature(true) - addSignature(false) - addSignature(false) - - // length invalidation - req.Signees = req.Signees[:0] - - if err := req.VerifySignatures(); err != ErrSignVerification { - t.Fatalf("verify should be failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_VerifySignatures3(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - addSignature := func(calcHash bool) { - priv, _, err := asymmetric.GenSecp256k1KeyPair() - _, _, err = req.SignRequestHeader(priv, calcHash) - if err != nil { - t.Fatalf("sign request failed, req: %v, err: %v", req, err) - } - } - - // add 3 signatures - addSignature(true) - addSignature(false) - addSignature(false) - - // length invalidation - req.RequestHash = hash.Hash{} - - if err := req.VerifySignatures(); err != ErrSignVerification { - t.Fatalf("verify should be failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_VerifySignatures4(t *testing.T) { - header := generateRandomBillingRequestHeader() - req := &BillingRequest{ - Header: *header, - } - - addSignature := func(calcHash bool) { - priv, _, err := asymmetric.GenSecp256k1KeyPair() - _, _, err = req.SignRequestHeader(priv, calcHash) - if err != nil { - t.Fatalf("sign request failed, req: %v, err: %v", req, err) - } - } - - // add 3 signatures - addSignature(true) - addSignature(false) - addSignature(false) - - // length invalidation - _, req.Signees[0], _ = asymmetric.GenSecp256k1KeyPair() - - if err := req.VerifySignatures(); err == nil || err != ErrSignVerification { - t.Fatalf("verify should be failed, req: %v, err: %v", req, err) - } -} - -func TestBillingRequest_Compare(t *testing.T) { - req, _ := generateRandomBillingRequest() - - if err := req.Compare(req); err != nil { - t.Fatalf("compare failed, req: %v, err: %v", req, err) - } - - var req2 BillingRequest - req2 = *req - - req2.Header.LowBlock = hash.Hash{} - - if err := req.Compare(&req2); err != ErrBillingNotMatch { - t.Fatalf("compare should be failed, req: %v, req2: %v, err: %v", req, req2, err) - } -} - -func TestBillingRequest_Compare2(t *testing.T) { - req, _ := generateRandomBillingRequest() - var req2 BillingRequest - req2 = *req - - var gasAmount proto.AddrAndGas - gasAmount = *req.Header.GasAmounts[0] - gasAmount.GasAmount += 10 - req2.Header.GasAmounts = nil - req2.Header.GasAmounts = append(req2.Header.GasAmounts, &gasAmount) - req2.Header.GasAmounts = append(req2.Header.GasAmounts, req.Header.GasAmounts[1:]...) - - if err := req.Compare(&req2); err != ErrBillingNotMatch { - t.Fatalf("compare should be failed, req: %v, req2: %v, err: %v", req, req2, err) - } -} diff --git a/types/billing_test.go b/types/billing_test.go deleted file mode 100644 index 71ba70ec4..000000000 --- a/types/billing_test.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package types - -import ( - "reflect" - "testing" - - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/utils" -) - -func TestBillingHeader_MarshalUnmarshalBinary(t *testing.T) { - tc, err := generateRandomBillingHeader() - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - enc, err := utils.EncodeMsgPack(tc) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - dec := &BillingHeader{} - err = utils.DecodeMsgPack(enc.Bytes(), dec) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - if tc.Nonce != dec.Nonce { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tc.Nonce, tc.Nonce) - } - if tc.BillingRequest.RequestHash != dec.BillingRequest.RequestHash { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tc.BillingRequest.RequestHash, tc.BillingRequest.RequestHash) - } - if !tc.BillingRequest.Signatures[0].IsEqual(dec.BillingRequest.Signatures[0]) { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tc.BillingRequest.Signatures[0], dec.BillingRequest.Signatures[0]) - } - for i := range tc.Receivers { - if !reflect.DeepEqual(tc.Receivers[i], dec.Receivers[i]) { - t.Fatalf("value not match: \n\ttc.Receivers[%d]=%v\n\tReceive[%d]=%v", i, i, tc.Receivers[i], tc.Receivers[0]) - } - if tc.Rewards[i] != dec.Rewards[i] { - t.Fatalf("value not match: \n\ttc.Rewards[%d]=%v\n\tRewards[%d]=%v", i, i, tc.Rewards[i], tc.Rewards[0]) - } - if tc.Fees[i] != dec.Fees[i] { - t.Fatalf("value not match: \n\ttc.Fees[%d]=%v\n\tFees[%d]=%v", i, i, tc.Fees[i], tc.Fees[0]) - } - } -} - -func TestBilling_SerializeDeserialize(t *testing.T) { - tb, err := generateRandomBilling() - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - enc, err := utils.EncodeMsgPack(tb) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - dec := Billing{} - err = utils.DecodeMsgPack(enc.Bytes(), &dec) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - if !tb.Signature.IsEqual(dec.Signature) { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tb.Signature, dec.Signature) - } - if !tb.Signee.IsEqual(dec.Signee) { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tb.Signee, dec.Signee) - } - if tb.Hash() != dec.Hash() { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", tb.Hash(), dec.Hash()) - } -} - -func TestBilling_PackAndSignTx(t *testing.T) { - tb, err := generateRandomBilling() - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - - priv, _, err := asymmetric.GenSecp256k1KeyPair() - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - tb.Sign(priv) - enc, err := tb.BillingHeader.MarshalHash() - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - h := hash.THashH(enc[:]) - sign, err := priv.Sign(h[:]) - if err != nil { - t.Fatalf("unexpeted error: %v", err) - } - if !sign.IsEqual(tb.Signature) { - t.Fatalf("value not match: \n\tv1=%v\n\tv2=%v", sign, tb.Signature) - } - - err = tb.Verify() - if err != nil { - t.Fatalf("verify signature failed: %v", err) - } - - // get - addr := hash.Hash(tb.GetAccountAddress()) - if addr.IsEqual(&hash.Hash{}) { - t.Fatal("get hash failed") - } - - tb.GetAccountNonce() - - if len(tb.GetDatabaseID()) == 0 { - t.Fatal("get empty DatabaseID") - } - - tb.Signature = nil - err = tb.Verify() - if err == nil { - t.Fatal("verify signature should failed") - } -} diff --git a/types/bprpc.go b/types/bprpc.go index a68025267..73a74593c 100644 --- a/types/bprpc.go +++ b/types/bprpc.go @@ -34,17 +34,6 @@ type AdviseNewBlockResp struct { proto.Envelope } -// AdviseTxBillingReq defines a request of the AdviseTxBilling RPC method. -type AdviseTxBillingReq struct { - proto.Envelope - TxBilling *Billing -} - -// AdviseTxBillingResp defines a response of the AdviseTxBilling RPC method. -type AdviseTxBillingResp struct { - proto.Envelope -} - // FetchBlockReq defines a request of the FetchBlock RPC method. type FetchBlockReq struct { proto.Envelope diff --git a/types/xxx_test.go b/types/xxx_test.go index a15dd8666..521fceba0 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -23,7 +23,6 @@ import ( "testing" "time" - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -97,103 +96,6 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro return } -func generateRandomBillingRequestHeader() *BillingRequestHeader { - return &BillingRequestHeader{ - DatabaseID: generateRandomDatabaseID(), - LowBlock: generateRandomHash(), - LowHeight: rand.Int31(), - HighBlock: generateRandomHash(), - HighHeight: rand.Int31(), - GasAmounts: generateRandomGasAmount(peerNum), - } -} - -func generateRandomBillingRequest() (req *BillingRequest, err error) { - reqHeader := generateRandomBillingRequestHeader() - req = &BillingRequest{ - Header: *reqHeader, - } - if _, err = req.PackRequestHeader(); err != nil { - return nil, err - } - - for i := 0; i < peerNum; i++ { - // Generate key pair - var priv *asymmetric.PrivateKey - - if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { - return - } - - if _, _, err = req.SignRequestHeader(priv, false); err != nil { - return - } - } - - return -} - -func generateRandomBillingHeader() (tc *BillingHeader, err error) { - var req *BillingRequest - if req, err = generateRandomBillingRequest(); err != nil { - return - } - - var priv *asymmetric.PrivateKey - if priv, _, err = asymmetric.GenSecp256k1KeyPair(); err != nil { - return - } - - if _, _, err = req.SignRequestHeader(priv, false); err != nil { - return - } - - receivers := make([]*proto.AccountAddress, peerNum) - fees := make([]uint64, peerNum) - rewards := make([]uint64, peerNum) - for i := range fees { - h := generateRandomHash() - accountAddress := proto.AccountAddress(h) - receivers[i] = &accountAddress - fees[i] = rand.Uint64() - rewards[i] = rand.Uint64() - } - - producer := proto.AccountAddress(generateRandomHash()) - tc = NewBillingHeader(pi.AccountNonce(rand.Uint32()), req, producer, receivers, fees, rewards) - return tc, nil -} - -func generateRandomBilling() (*Billing, error) { - header, err := generateRandomBillingHeader() - if err != nil { - return nil, err - } - priv, _, err := asymmetric.GenSecp256k1KeyPair() - if err != nil { - return nil, err - } - txBilling := NewBilling(header) - if err := txBilling.Sign(priv); err != nil { - return nil, err - } - return txBilling, nil -} - -func generateRandomGasAmount(n int) []*proto.AddrAndGas { - gasAmount := make([]*proto.AddrAndGas, n) - - for i := range gasAmount { - gasAmount[i] = &proto.AddrAndGas{ - AccountAddress: proto.AccountAddress(generateRandomHash()), - RawNodeID: proto.RawNodeID{Hash: generateRandomHash()}, - GasAmount: rand.Uint64(), - } - } - - return gasAmount -} - func randBytes(n int) (b []byte) { b = make([]byte, n) rand.Read(b) From e2c1a7ab6004c8b0ce0709b8c803728883dd20ce Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 18:50:15 +0800 Subject: [PATCH 081/244] Add billing range --- blockproducer/errors.go | 2 + blockproducer/interfaces/transaction.go | 6 +-- blockproducer/interfaces/transaction_test.go | 2 +- .../interfaces/transaction_wrapper_test.go | 30 +++++++-------- blockproducer/metastate.go | 7 ++++ blockproducer/metastate_test.go | 16 ++++++++ sqlchain/chain.go | 17 ++++++--- sqlchain/config.go | 10 ++--- sqlchain/runtime.go | 25 +++++++++--- types/bp_block_test.go | 2 +- types/msgpack_test.go | 3 -- types/updatebilling.go | 6 +++ types/updatebilling_gen.go | 27 +++++++++++-- types/updatebilling_gen_test.go | 37 ++++++++++++++++++ types/xxx_test.go | 29 +++++++++++++- worker/db.go | 13 +++---- worker/db_config.go | 1 + worker/dbms.go | 38 +++++++++++++++++++ 18 files changed, 219 insertions(+), 52 deletions(-) diff --git a/blockproducer/errors.go b/blockproducer/errors.go index 2a6cc74cb..f8a92e0d8 100644 --- a/blockproducer/errors.go +++ b/blockproducer/errors.go @@ -54,6 +54,8 @@ var ( ErrUnknownTransactionType = errors.New("unknown transaction type") // ErrInvalidSender indicates that tx.Signee != tx.Sender. ErrInvalidSender = errors.New("invalid sender") + // ErrInvalidRange indicates that the billing range is invalid. + ErrInvalidRange = errors.New("invalid billing range") // ErrNoSuchMiner indicates that this miner does not exist or register. ErrNoSuchMiner = errors.New("no such miner") // ErrNoEnoughMiner indicates that there is not enough miners diff --git a/blockproducer/interfaces/transaction.go b/blockproducer/interfaces/transaction.go index 085a1d74b..344c1467c 100644 --- a/blockproducer/interfaces/transaction.go +++ b/blockproducer/interfaces/transaction.go @@ -46,10 +46,8 @@ func FromBytes(b []byte) TransactionType { } const ( - // TransactionTypeBilling defines billing transaction type. - TransactionTypeBilling TransactionType = iota // TransactionTypeTransfer defines transfer transaction type. - TransactionTypeTransfer + TransactionTypeTransfer TransactionType = iota // TransactionTypeCreateAccount defines account creation transaction type. TransactionTypeCreateAccount // TransactionTypeDeleteAccount defines account deletion transaction type. @@ -78,8 +76,6 @@ const ( func (t TransactionType) String() string { switch t { - case TransactionTypeBilling: - return "Billing" case TransactionTypeTransfer: return "Transfer" case TransactionTypeCreateAccount: diff --git a/blockproducer/interfaces/transaction_test.go b/blockproducer/interfaces/transaction_test.go index 7a80ac53b..c7c436e43 100644 --- a/blockproducer/interfaces/transaction_test.go +++ b/blockproducer/interfaces/transaction_test.go @@ -55,7 +55,7 @@ func TestTypes(t *testing.T) { } }) Convey("test string", t, func() { - for i := TransactionTypeBilling; i != TransactionTypeNumber+1; i++ { + for i := TransactionTypeTransfer; i != TransactionTypeNumber+1; i++ { So(i.String(), ShouldNotBeEmpty) } }) diff --git a/blockproducer/interfaces/transaction_wrapper_test.go b/blockproducer/interfaces/transaction_wrapper_test.go index 8084c4453..3cf018e33 100644 --- a/blockproducer/interfaces/transaction_wrapper_test.go +++ b/blockproducer/interfaces/transaction_wrapper_test.go @@ -63,7 +63,7 @@ func (e *TestTransactionEncode) Msgsize() int { } func init() { - pi.RegisterTransaction(pi.TransactionTypeBilling, (*TestTransactionEncode)(nil)) + pi.RegisterTransaction(pi.TransactionTypeTransfer, (*TestTransactionEncode)(nil)) } func TestTransactionWrapper(t *testing.T) { @@ -81,13 +81,13 @@ func TestTransactionWrapper(t *testing.T) { // encode test e := &TestTransactionEncode{} - e.SetTransactionType(pi.TransactionTypeBilling) + e.SetTransactionType(pi.TransactionTypeTransfer) buf, err = utils.EncodeMsgPack(e) So(err, ShouldBeNil) var v2 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v2) So(err, ShouldBeNil) - So(v2.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + So(v2.GetTransactionType(), ShouldEqual, pi.TransactionTypeTransfer) // encode with wrapper test e2 := pi.WrapTransaction(e) @@ -96,14 +96,14 @@ func TestTransactionWrapper(t *testing.T) { var v3 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v3) So(err, ShouldBeNil) - So(v3.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + So(v3.GetTransactionType(), ShouldEqual, pi.TransactionTypeTransfer) tw, ok := v3.(*pi.TransactionWrapper) So(ok, ShouldBeTrue) - So(tw.Unwrap().GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + So(tw.Unwrap().GetTransactionType(), ShouldEqual, pi.TransactionTypeTransfer) // test encode non-existence type e3 := &TestTransactionEncode{} - e3.SetTransactionType(pi.TransactionTypeTransfer) + e3.SetTransactionType(pi.TransactionTypeCreateAccount) buf, err = utils.EncodeMsgPack(e3) So(err, ShouldBeNil) var v4 pi.Transaction @@ -132,21 +132,21 @@ func TestTransactionWrapper(t *testing.T) { So(err, ShouldNotBeNil) // test invalid decode, nil payload - buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, nil}) + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeTransfer, nil}) So(err, ShouldBeNil) var v8 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v8) So(err, ShouldNotBeNil) // test invalid decode, invalid payload container type - buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, []uint64{}}) + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeTransfer, []uint64{}}) So(err, ShouldBeNil) var v9 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v9) So(err, ShouldNotBeNil) // extra payload - buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeBilling, e, 1, 2}) + buf, err = utils.EncodeMsgPack([]interface{}{pi.TransactionTypeTransfer, e, 1, 2}) So(err, ShouldBeNil) var v10 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v10) @@ -174,14 +174,14 @@ func TestTransactionWrapper(t *testing.T) { So(err, ShouldNotBeNil) // test tx data - buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": 1}) + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeTransfer, "TestField": 1}) So(err, ShouldBeNil) var v14 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v14) So(err, ShouldBeNil) // test invalid tx data - buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeBilling, "TestField": "happy"}) + buf, err = utils.EncodeMsgPack(map[string]interface{}{"TxType": pi.TransactionTypeTransfer, "TestField": "happy"}) So(err, ShouldBeNil) var v15 pi.Transaction err = utils.DecodeMsgPack(buf.Bytes(), &v15) @@ -189,7 +189,7 @@ func TestTransactionWrapper(t *testing.T) { // test json marshal and unmarshal v16 := &TestTransactionEncode{TestField: 10} - v16.SetTransactionType(pi.TransactionTypeBilling) + v16.SetTransactionType(pi.TransactionTypeTransfer) var v17 pi.Transaction = v16 var jsonData []byte jsonData, err = json.Marshal(v17) @@ -200,7 +200,7 @@ func TestTransactionWrapper(t *testing.T) { err = json.Unmarshal(jsonData, &v18) So(err, ShouldBeNil) So(v18.(*pi.TransactionWrapper).Unwrap(), ShouldNotBeNil) - So(v18.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + So(v18.GetTransactionType(), ShouldEqual, pi.TransactionTypeTransfer) So(v18.(*pi.TransactionWrapper).Unwrap().(*TestTransactionEncode).TestField, ShouldEqual, 10) jsonData, err = json.Marshal(v18) @@ -210,7 +210,7 @@ func TestTransactionWrapper(t *testing.T) { jsonData = []byte(`{"TxType": 0, "TestField": 11}`) err = json.Unmarshal(jsonData, &v18) So(err, ShouldBeNil) - So(v18.GetTransactionType(), ShouldEqual, pi.TransactionTypeBilling) + So(v18.GetTransactionType(), ShouldEqual, pi.TransactionTypeTransfer) So(v18.(*pi.TransactionWrapper).Unwrap().(*TestTransactionEncode).TestField, ShouldEqual, 11) // unmarshal fail cases @@ -225,7 +225,7 @@ func TestTransactionWrapper(t *testing.T) { So(err, ShouldNotBeNil) v18.(*pi.TransactionWrapper).Transaction = nil - jsonData = []byte(fmt.Sprintf(`{"TxType": %d, "TestField": 11}`, pi.TransactionTypeTransfer)) + jsonData = []byte(fmt.Sprintf(`{"TxType": %d, "TestField": 11}`, pi.TransactionTypeCreateAccount)) err = json.Unmarshal(jsonData, &v18) So(err, ShouldNotBeNil) }) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 59d13687b..11070fb10 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -907,6 +907,12 @@ func (s *metaState) updateBilling(tx *types.UpdateBilling) (err error) { err = errors.Wrap(ErrDatabaseNotFound, "update billing failed") return } + if tx.Range.From >= tx.Range.To || newProfile.LastUpdatedHeight != tx.Range.From { + err = errors.Wrapf(ErrInvalidRange, + "update billing within range %d:(%d, %d]", + newProfile.LastUpdatedHeight, tx.Range.From, tx.Range.To) + return + } log.Debugf("update billing addr: %s, user: %d, tx: %v", tx.GetAccountAddress(), len(tx.Users), tx) if newProfile.GasPrice == 0 { @@ -980,6 +986,7 @@ func (s *metaState) updateBilling(tx *types.UpdateBilling) (err error) { } } } + newProfile.LastUpdatedHeight = tx.Range.To s.dirty.databases[tx.Receiver.DatabaseID()] = newProfile return } diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index 7f1d5a4cb..8f4be6647 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -1163,6 +1163,10 @@ func TestMetaState(t *testing.T) { }, }, }, + Range: types.Range{ + From: 0, + To: 10, + }, }) nonce, err = ms.nextNonce(addr2) So(err, ShouldBeNil) @@ -1341,6 +1345,10 @@ func TestMetaState(t *testing.T) { UpdateBillingHeader: types.UpdateBillingHeader{ Receiver: addr1, Nonce: up.Nonce, + Range: types.Range{ + From: 0, + To: 10, + }, }, } err = ub1.Sign(privKey1) @@ -1427,6 +1435,10 @@ func TestMetaState(t *testing.T) { Receiver: dbAccount, Users: users[:], Nonce: 2, + Range: types.Range{ + From: 0, + To: 10, + }, }, } err = ub2.Sign(privKey2) @@ -1474,6 +1486,10 @@ func TestMetaState(t *testing.T) { Receiver: dbAccount, Users: users[:], Nonce: 3, + Range: types.Range{ + From: 10, + To: 20, + }, }, } err = ub3.Sign(privKey2) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 30c28d0bb..22870e041 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -577,6 +577,7 @@ func (c *Chain) syncHead() { // runCurrentTurn does the check and runs block producing if its my turn. func (c *Chain) runCurrentTurn(now time.Time) { + h := c.rt.getNextTurn() le := c.logEntryWithHeadState().WithFields(log.Fields{ "using_timestamp": now.Format(time.RFC3339Nano), }) @@ -589,7 +590,7 @@ func (c *Chain) runCurrentTurn(now time.Time) { // Info the block processing goroutine that the chain height has grown, so please return // any stashed blocks for further check. select { - case c.heights <- c.rt.getHead().Height: + case c.heights <- h: case <-c.rt.ctx.Done(): le.Debug("abort publishing height") } @@ -682,9 +683,8 @@ func (c *Chain) processBlocks(ctx context.Context) { select { case h := <-c.heights: // Trigger billing - head := c.rt.getHead() if uint64(h)%c.updatePeriod == 0 { - ub, err := c.billing(h, head.node) + ub, err := c.billing(h, c.rt.getHead().node) if err != nil { le.WithError(err).Error("billing failed") } @@ -737,7 +737,6 @@ func (c *Chain) processBlocks(ctx context.Context) { } else { if err := c.CheckAndPushNewBlock(block); err != nil { le.WithError(err).Error("failed to check and push new block") - } else { } } } @@ -996,7 +995,7 @@ func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err iter *blockNode minerAddr proto.AccountAddress userAddr proto.AccountAddress - minHeight = h - int32(c.updatePeriod) + minHeight = c.rt.getLastBillingHeight() usersMap = make(map[proto.AccountAddress]uint64) minersMap = make(map[proto.AccountAddress]map[proto.AccountAddress]uint64) ) @@ -1075,9 +1074,17 @@ func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err i++ } ub.Receiver, err = c.databaseID.AccountAddress() + ub.Range.From = uint32(minHeight) + ub.Range.To = uint32(h) return } +func (c *Chain) SetLastBillingHeight(h int32) { + c.logEntryWithHeadState().WithFields( + log.Fields{"new_height": h}).Debug("set last billing height") + c.rt.setLastBillingHeight(h) +} + func (c *Chain) logEntry() *log.Entry { return log.WithFields(log.Fields{ "db": c.databaseID, diff --git a/sqlchain/config.go b/sqlchain/config.go index b43e5a3c7..769ceb8e4 100644 --- a/sqlchain/config.go +++ b/sqlchain/config.go @@ -42,9 +42,9 @@ type Config struct { BlockCacheTTL int32 // DBAccount info - TokenType types.TokenType - GasPrice uint64 - UpdatePeriod uint64 - - IsolationLevel int + TokenType types.TokenType + GasPrice uint64 + UpdatePeriod uint64 + LastBillingHeight int32 + IsolationLevel int } diff --git a/sqlchain/runtime.go b/sqlchain/runtime.go index 15bc069eb..9cb7dc441 100644 --- a/sqlchain/runtime.go +++ b/sqlchain/runtime.go @@ -76,8 +76,8 @@ type runtime struct { nextTurn int32 // head is the current head of the best chain. head *state - // forks is the alternative head of the sql-chain. - forks []*state + // lastBillingHeight is the last success billing height of the current database. + lastBillingHeight int32 // timeMutex protects following time-relative fields. timeMutex sync.Mutex @@ -126,10 +126,11 @@ func newRunTime(ctx context.Context, c *Config) (r *runtime) { return -1 }(), - total: int32(len(c.Peers.Servers)), - nextTurn: 1, - head: &state{}, - offset: time.Duration(0), + total: int32(len(c.Peers.Servers)), + nextTurn: 1, + head: &state{}, + lastBillingHeight: c.LastBillingHeight, + offset: time.Duration(0), } if c.Genesis != nil { @@ -318,6 +319,18 @@ func (r *runtime) getPeers() *proto.Peers { return &peers } +func (r *runtime) getLastBillingHeight() int32 { + r.stateMutex.Lock() + defer r.stateMutex.Unlock() + return r.lastBillingHeight +} + +func (r *runtime) setLastBillingHeight(h int32) { + r.stateMutex.Lock() + defer r.stateMutex.Unlock() + r.lastBillingHeight = h +} + func (r *runtime) getHead() *state { r.stateMutex.Lock() defer r.stateMutex.Unlock() diff --git a/types/bp_block_test.go b/types/bp_block_test.go index 14d6d6d96..24025381d 100644 --- a/types/bp_block_test.go +++ b/types/bp_block_test.go @@ -164,7 +164,7 @@ func TestBlock_PackAndSignBlock(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - tb, err := generateRandomBilling() + tb, err := generateRandomTransfer() if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/types/msgpack_test.go b/types/msgpack_test.go index 248a889b9..0698b9bc9 100644 --- a/types/msgpack_test.go +++ b/types/msgpack_test.go @@ -50,7 +50,6 @@ func TestEncodeDecodeTransactions(t *testing.T) { var t []pi.Transaction t = append(t, NewBaseAccount(&Account{})) t = append(t, NewTransfer(&TransferHeader{})) - t = append(t, NewBilling(&BillingHeader{})) t = append(t, NewCreateDatabase(&CreateDatabaseHeader{})) buf, err := utils.EncodeMsgPack(t) @@ -73,12 +72,10 @@ func TestEncodeDecodeTransactions(t *testing.T) { t.Tx = NewBaseAccount(&Account{}) t.Txs = append(t.Txs, NewBaseAccount(&Account{})) t.Txs = append(t.Txs, NewTransfer(&TransferHeader{})) - t.Txs = append(t.Txs, NewBilling(&BillingHeader{})) t.Txs = append(t.Txs, NewCreateDatabase(&CreateDatabaseHeader{})) t.Maps = make(map[string]pi.Transaction) t.Maps["BaseAccount"] = NewBaseAccount(&Account{}) t.Maps["Transfer"] = NewTransfer(&TransferHeader{}) - t.Maps["Billing"] = NewBilling(&BillingHeader{}) t.Maps["CreateDatabase"] = NewCreateDatabase(&CreateDatabaseHeader{}) buf, err := utils.EncodeMsgPack(t) So(err, ShouldBeNil) diff --git a/types/updatebilling.go b/types/updatebilling.go index 366b59378..9b9b12e4e 100644 --- a/types/updatebilling.go +++ b/types/updatebilling.go @@ -26,6 +26,11 @@ import ( //go:generate hsp +// Range defines a height range (from, to]. +type Range struct { + From, To uint32 +} + // MinerIncome defines the income of miner. type MinerIncome struct { Miner proto.AccountAddress @@ -44,6 +49,7 @@ type UpdateBillingHeader struct { Receiver proto.AccountAddress Nonce pi.AccountNonce Users []*UserCost + Range Range } // UpdateBilling defines the UpdateBilling transaction. diff --git a/types/updatebilling_gen.go b/types/updatebilling_gen.go index 79a626ffb..36e465be5 100644 --- a/types/updatebilling_gen.go +++ b/types/updatebilling_gen.go @@ -27,6 +27,23 @@ func (z *MinerIncome) Msgsize() (s int) { return } +// MarshalHash marshals for hash +func (z Range) MarshalHash() (o []byte, err error) { + var b []byte + o = hsp.Require(b, z.Msgsize()) + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendUint32(o, z.From) + o = hsp.AppendUint32(o, z.To) + return +} + +// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message +func (z Range) Msgsize() (s int) { + s = 1 + 5 + hsp.Uint32Size + 3 + hsp.Uint32Size + return +} + // MarshalHash marshals for hash func (z *UpdateBilling) MarshalHash() (o []byte, err error) { var b []byte @@ -61,13 +78,17 @@ func (z *UpdateBilling) Msgsize() (s int) { func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { var b []byte o = hsp.Require(b, z.Msgsize()) - // map header, size 3 - o = append(o, 0x83) + // map header, size 4 + o = append(o, 0x84) if oTemp, err := z.Nonce.MarshalHash(); err != nil { return nil, err } else { o = hsp.AppendBytes(o, oTemp) } + // map header, size 2 + o = append(o, 0x82) + o = hsp.AppendUint32(o, z.Range.From) + o = hsp.AppendUint32(o, z.Range.To) if oTemp, err := z.Receiver.MarshalHash(); err != nil { return nil, err } else { @@ -90,7 +111,7 @@ func (z *UpdateBillingHeader) MarshalHash() (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *UpdateBillingHeader) Msgsize() (s int) { - s = 1 + 6 + z.Nonce.Msgsize() + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize + s = 1 + 6 + z.Nonce.Msgsize() + 6 + 1 + 5 + hsp.Uint32Size + 3 + hsp.Uint32Size + 9 + z.Receiver.Msgsize() + 6 + hsp.ArrayHeaderSize for za0001 := range z.Users { if z.Users[za0001] == nil { s += hsp.NilSize diff --git a/types/updatebilling_gen_test.go b/types/updatebilling_gen_test.go index d6ab30c03..8881dc0d3 100644 --- a/types/updatebilling_gen_test.go +++ b/types/updatebilling_gen_test.go @@ -46,6 +46,43 @@ func BenchmarkAppendMsgMinerIncome(b *testing.B) { } } +func TestMarshalHashRange(t *testing.T) { + v := Range{} + binary.Read(rand.Reader, binary.BigEndian, &v) + bts1, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + bts2, err := v.MarshalHash() + if err != nil { + t.Fatal(err) + } + if !bytes.Equal(bts1, bts2) { + t.Fatal("hash not stable") + } +} + +func BenchmarkMarshalHashRange(b *testing.B) { + v := Range{} + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.MarshalHash() + } +} + +func BenchmarkAppendMsgRange(b *testing.B) { + v := Range{} + bts := make([]byte, 0, v.Msgsize()) + bts, _ = v.MarshalHash() + b.SetBytes(int64(len(bts))) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + bts, _ = v.MarshalHash() + } +} + func TestMarshalHashUpdateBilling(t *testing.T) { v := UpdateBilling{} binary.Read(rand.Reader, binary.BigEndian, &v) diff --git a/types/xxx_test.go b/types/xxx_test.go index 521fceba0..42d719e19 100644 --- a/types/xxx_test.go +++ b/types/xxx_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -59,6 +60,32 @@ func randStringBytes(n int) string { return string(b) } +func generateRandomTransferHeader() (header *TransferHeader, err error) { + header = &TransferHeader{ + Nonce: pi.AccountNonce(rand.Uint64()), + Amount: rand.Uint64(), + TokenType: TokenType(rand.Intn(int(SupportTokenNumber))), + } + return +} + +func generateRandomTransfer() (tx *Transfer, err error) { + header, err := generateRandomTransferHeader() + if err != nil { + return + + } + priv, _, err := asymmetric.GenSecp256k1KeyPair() + if err != nil { + return + } + tx = NewTransfer(header) + if err = tx.Sign(priv); err != nil { + return + } + return +} + func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err error) { // Generate key pair priv, _, err := asymmetric.GenSecp256k1KeyPair() @@ -82,7 +109,7 @@ func generateRandomBlock(parent hash.Hash, isGenesis bool) (b *BPBlock, err erro } for i, n := 0, rand.Intn(10)+10; i < n; i++ { - tb, err := generateRandomBilling() + tb, err := generateRandomTransfer() if err != nil { return nil, err diff --git a/worker/db.go b/worker/db.go index 4e27c2148..fee900fc9 100644 --- a/worker/db.go +++ b/worker/db.go @@ -166,13 +166,12 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, MuxService: cfg.ChainMux, Server: db.nodeID, - Period: conf.GConf.SQLChainPeriod, - Tick: conf.GConf.SQLChainTick, - QueryTTL: conf.GConf.SQLChainTTL, - - UpdatePeriod: cfg.UpdateBlockCount, - - IsolationLevel: cfg.IsolationLevel, + Period: conf.GConf.SQLChainPeriod, + Tick: conf.GConf.SQLChainTick, + QueryTTL: conf.GConf.SQLChainTTL, + LastBillingHeight: cfg.LastBillingHeight, + UpdatePeriod: cfg.UpdateBlockCount, + IsolationLevel: cfg.IsolationLevel, } if db.chain, err = sqlchain.NewChain(chainCfg); err != nil { return diff --git a/worker/db_config.go b/worker/db_config.go index 97270a627..38f840959 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -33,6 +33,7 @@ type DBConfig struct { EncryptionKey string SpaceLimit uint64 UpdateBlockCount uint64 + LastBillingHeight int32 UseEventualConsistency bool ConsistencyLevel float64 IsolationLevel int diff --git a/worker/dbms.go b/worker/dbms.go index e12641249..cec621e35 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -182,11 +182,44 @@ func (dbms *DBMS) Init() (err error) { err = errors.Wrap(err, "init chain bus failed") return } + if err = dbms.busService.Subscribe("/UpdateBilling/", dbms.updateBilling); err != nil { + err = errors.Wrap(err, "init chain bus failed") + return + } dbms.busService.Start() return } +func (dbms *DBMS) updateBilling(itx interfaces.Transaction, count uint32) { + var ( + tx *types.UpdateBilling + ok bool + ) + if tx, ok = itx.(*types.UpdateBilling); !ok { + log.WithFields(log.Fields{ + "type": itx.GetTransactionType(), + }).WithError(ErrInvalidTransactionType).Warn("invalid tx type in update billing") + return + } + // Get profile and database instance + var ( + id = tx.Receiver.DatabaseID() + profile *types.SQLChainProfile + database *Database + ) + le := log.WithFields(log.Fields{ + "id": id, + }) + if database, ok = dbms.getMeta(id); !ok { + le.Warn("cannot find database") + } + if profile, ok = dbms.busService.RequestSQLProfile(id); !ok { + le.Warn("cannot find profile") + } + database.chain.SetLastBillingHeight(int32(profile.LastUpdatedHeight)) +} + func (dbms *DBMS) createDatabase(tx interfaces.Transaction, count uint32) { cd, ok := tx.(*types.CreateDatabase) if !ok { @@ -399,6 +432,11 @@ func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err err SlowQueryTime: DefaultSlowQueryTime, } + // set last billing height + if profile, ok := dbms.busService.RequestSQLProfile(dbCfg.DatabaseID); ok { + dbCfg.LastBillingHeight = int32(profile.LastUpdatedHeight) + } + if db, err = NewDatabase(dbCfg, instance.Peers, instance.GenesisBlock); err != nil { return } From d2a48cfff22a94692aad573fe4f450e22e2bbbd3 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Thu, 7 Mar 2019 19:47:20 +0800 Subject: [PATCH 082/244] Add comment for exported method --- sqlchain/chain.go | 1 + 1 file changed, 1 insertion(+) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index 22870e041..a64e7ad69 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -1079,6 +1079,7 @@ func (c *Chain) billing(h int32, node *blockNode) (ub *types.UpdateBilling, err return } +// SetLastBillingHeight sets the last billing height of this chain instance. func (c *Chain) SetLastBillingHeight(h int32) { c.logEntryWithHeadState().WithFields( log.Fields{"new_height": h}).Debug("set last billing height") From 74c0c847a47e719435780b47b6a6c4a04d01ec24 Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 7 Mar 2019 23:15:41 +0800 Subject: [PATCH 083/244] Update README.md --- README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 6da536ba9..c9ee7639f 100644 --- a/README.md +++ b/README.md @@ -114,10 +114,8 @@ Watch us or [![follow on Twitter](https://img.shields.io/twitter/url/https/twitt ## Contact +- [YouTube](https://www.youtube.com/channel/UCe9P_TMiexSHW2GGV5qBmZw) +- [Blog](https://medium.com/me/stories/public) - [Mail](mailto:webmaster@covenantsql.io) - [Forum](https://demo.covenantsql.io/forum/) -- - follow on Twitter - -- [![Join the chat at https://gitter.im/CovenantSQL/CovenantSQL](https://badges.gitter.im/CovenantSQL/CovenantSQL.svg)](https://gitter.im/CovenantSQL/CovenantSQL?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +- follow on Twitter From bd3a1cd2c9c1f16362150427fb17ce427a521f84 Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 7 Mar 2019 23:17:34 +0800 Subject: [PATCH 084/244] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c9ee7639f..b90585ac0 100644 --- a/README.md +++ b/README.md @@ -114,8 +114,8 @@ Watch us or [![follow on Twitter](https://img.shields.io/twitter/url/https/twitt ## Contact +- [Blog](https://medium.com/@covenant_labs) - [YouTube](https://www.youtube.com/channel/UCe9P_TMiexSHW2GGV5qBmZw) -- [Blog](https://medium.com/me/stories/public) - [Mail](mailto:webmaster@covenantsql.io) - [Forum](https://demo.covenantsql.io/forum/) - follow on Twitter From 8513c62eb87dc9b1c21d2438571ce8f9b9ad77de Mon Sep 17 00:00:00 2001 From: auxten Date: Thu, 7 Mar 2019 23:50:18 +0800 Subject: [PATCH 085/244] Add video link --- README.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/README.md b/README.md index b90585ac0..2e1a00bfb 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,9 @@ sql.Open("CovenantSQL", dbURI) - Each Database has its own independent distributed engine. - Mainly responsible for: database storage & encryption, query processing & signature, efficient indexing. +## How it works +[How CovenantSQL works(video)](https://youtu.be/2Mz5POxxaQM?t=106) + ## Papers Our team members published: From 91b63e55ef6ed0d7ffc6a53d3fcc138bdc9ee08e Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 8 Mar 2019 16:06:00 +0800 Subject: [PATCH 086/244] Trim release pkg --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 23da32731..5b287e7fd 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,7 @@ before_deploy: - make clean - make use_all_cores - mkdir -p build - - tar czvf build/CovenantSQL-$TRAVIS_TAG.$TRAVIS_OS_NAME-amd64.tar.gz $(ls bin/cql* | grep -v test) + - tar czvf build/CovenantSQL-$TRAVIS_TAG.$TRAVIS_OS_NAME-amd64.tar.gz bin/cql bin/cql-adapter bin/cql-fuse bin/cql-minerd bin/cql-mysql-adapter bin/cql-utils deploy: provider: releases From 40ea24278f56fcd5436b7db3a7a3a38c1c4853a8 Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 8 Mar 2019 16:39:23 +0800 Subject: [PATCH 087/244] Remove cql-adapter --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 5b287e7fd..3c7646c0a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -37,7 +37,7 @@ before_deploy: - make clean - make use_all_cores - mkdir -p build - - tar czvf build/CovenantSQL-$TRAVIS_TAG.$TRAVIS_OS_NAME-amd64.tar.gz bin/cql bin/cql-adapter bin/cql-fuse bin/cql-minerd bin/cql-mysql-adapter bin/cql-utils + - tar czvf build/CovenantSQL-$TRAVIS_TAG.$TRAVIS_OS_NAME-amd64.tar.gz bin/cql bin/cql-fuse bin/cql-minerd bin/cql-mysql-adapter bin/cql-utils deploy: provider: releases From 109bfea0cd313cf319e4cb1c3e236e90a4ff66a5 Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 10 Mar 2019 15:50:56 +0800 Subject: [PATCH 088/244] Add more test for symmetric --- crypto/symmetric/aes_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/crypto/symmetric/aes_test.go b/crypto/symmetric/aes_test.go index 4c469015b..6329cd9a0 100644 --- a/crypto/symmetric/aes_test.go +++ b/crypto/symmetric/aes_test.go @@ -30,6 +30,18 @@ const ( ) func TestEncryptDecryptWithPassword(t *testing.T) { + Convey("encrypt & decrypt 0 length string with aes256", t, func() { + enc, err := EncryptWithPassword([]byte(""), []byte(password), []byte(salt)) + So(enc, ShouldNotBeNil) + So(len(enc), ShouldEqual, 2*aes.BlockSize) + So(err, ShouldBeNil) + + dec, err := DecryptWithPassword(enc, []byte(password), []byte(salt)) + So(dec, ShouldNotBeNil) + So(len(dec), ShouldEqual, 0) + So(err, ShouldBeNil) + }) + Convey("encrypt & decrypt 0 length bytes with aes256", t, func() { enc, err := EncryptWithPassword([]byte(nil), []byte(password), []byte(salt)) So(enc, ShouldNotBeNil) From 671c434a3734d3fb7f8d05c08ff185252fbfb296 Mon Sep 17 00:00:00 2001 From: auxten Date: Sun, 10 Mar 2019 15:51:15 +0800 Subject: [PATCH 089/244] Add end to end decrypt encrypt func --- xenomint/sqlite/sqlite.go | 29 +++++++++++++++++++++++++++-- xenomint/sqlite/sqlite_test.go | 29 +++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/xenomint/sqlite/sqlite.go b/xenomint/sqlite/sqlite.go index c544193b3..f757c062b 100644 --- a/xenomint/sqlite/sqlite.go +++ b/xenomint/sqlite/sqlite.go @@ -22,6 +22,7 @@ import ( sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" + "github.com/CovenantSQL/CovenantSQL/crypto/symmetric" "github.com/CovenantSQL/CovenantSQL/storage" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -32,18 +33,42 @@ const ( ) func init() { + encryptFunc := func(in, pass, salt []byte) (out []byte, err error) { + out, err = symmetric.EncryptWithPassword(in, pass, salt) + return + } + + decryptFunc := func(in, pass, salt []byte) (out []byte, err error) { + out, err = symmetric.DecryptWithPassword(in, pass, salt) + return + } + sleepFunc := func(t int64) int64 { log.Info("sqlite func sleep start") time.Sleep(time.Duration(t)) log.Info("sqlite func sleep end") return t } + + regCustomFunc := func(c *sqlite3.SQLiteConn) (err error) { + if err = c.RegisterFunc("sleep", sleepFunc, true); err != nil { + return + } + if err = c.RegisterFunc("encrypt", encryptFunc, true); err != nil { + return + } + if err = c.RegisterFunc("decrypt", decryptFunc, true); err != nil { + return + } + return + } + sql.Register(dirtyReadDriver, &sqlite3.SQLiteDriver{ ConnectHook: func(c *sqlite3.SQLiteConn) (err error) { if _, err = c.Exec("PRAGMA read_uncommitted=1", nil); err != nil { return } - if err = c.RegisterFunc("sleep", sleepFunc, true); err != nil { + if err = regCustomFunc(c); err != nil { return } return @@ -51,7 +76,7 @@ func init() { }) sql.Register(serializableDriver, &sqlite3.SQLiteDriver{ ConnectHook: func(c *sqlite3.SQLiteConn) (err error) { - if err = c.RegisterFunc("sleep", sleepFunc, true); err != nil { + if err = regCustomFunc(c); err != nil { return } return diff --git a/xenomint/sqlite/sqlite_test.go b/xenomint/sqlite/sqlite_test.go index cdba9f1b2..b70b6aa91 100644 --- a/xenomint/sqlite/sqlite_test.go +++ b/xenomint/sqlite/sqlite_test.go @@ -22,6 +22,7 @@ import ( "math/rand" "os" "path" + "strings" "sync" "sync/atomic" "testing" @@ -58,6 +59,34 @@ func TestStorage(t *testing.T) { // Create basic table for testing _, err = st.Writer().Exec(`CREATE TABLE "t1" ("k" INT, "v" TEXT, PRIMARY KEY("k"))`) So(err, ShouldBeNil) + + Convey("Test custom encrypt decrypt func", func() { + _, err = st.Writer().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, encrypt(?, "pass", "salt"))`, 0, "v0enc") + So(err, ShouldBeNil) + _, err = st.Writer().Exec(`INSERT INTO "t1" ("k", "v") VALUES (?, encrypt(?)`, 1, "v0enc") + So(err.Error(), ShouldContainSubstring, "incomplete input") + var destStr string + err = st.Reader().QueryRow(`SELECT "v" FROM "t1" WHERE "k"=?`, 0).Scan(&destStr) + So(err, ShouldBeNil) + So(destStr, ShouldNotContainSubstring, "enc") + err = st.Reader().QueryRow(`SELECT decrypt("v", "pass", "salt") FROM "t1" WHERE "k"=?`, 0).Scan(&destStr) + So(err, ShouldBeNil) + So(destStr, ShouldEqual, "v0enc") + + var destSlice []byte + _, err = st.Writer().Exec(`UPDATE "t1" SET v = encrypt(@1, "pass", "salt") WHERE "k"=@2`, "", 0) + So(err, ShouldBeNil) + err = st.Reader().QueryRow(`SELECT decrypt("v", "pass", "salt") FROM "t1" WHERE "k"=?`, 0).Scan(&destSlice) + So(err, ShouldBeNil) + So(len(destSlice), ShouldEqual, 0) + + largeText := strings.Repeat("s", 10000) + _, err = st.Writer().Exec(`UPDATE "t1" SET v = encrypt(:1, "pass", "salt") WHERE "k"=:2`, largeText, 0) + So(err, ShouldBeNil) + err = st.Reader().QueryRow(`SELECT decrypt("v", "pass", "salt") FROM "t1" WHERE "k"=?`, 0).Scan(&destStr) + So(err, ShouldBeNil) + So(destStr, ShouldEqual, largeText) + }) Convey("When storage is closed", func() { err = st.Close() So(err, ShouldBeNil) From 2fcc7b0adf0e5430a2253e13aa2cccfaec38016e Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Tue, 12 Mar 2019 15:29:16 +0800 Subject: [PATCH 090/244] Support content-type header in cors --- cmd/cql/main.go | 2 +- sqlchain/adapter/server.go | 4 +++- sqlchain/observer/api.go | 4 +++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index de31d6109..634051e12 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -601,7 +601,7 @@ func main() { // if web flag is enabled if explorerAddr != "" || adapterAddr != "" { - fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s", explorerAddr, adapterAddr) + fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s\n", explorerAddr, adapterAddr) <-utils.WaitForExit() return } diff --git a/sqlchain/adapter/server.go b/sqlchain/adapter/server.go index de1fb533f..4c0611e3a 100644 --- a/sqlchain/adapter/server.go +++ b/sqlchain/adapter/server.go @@ -47,7 +47,9 @@ func NewHTTPAdapter(listenAddr string, configFile string) (adapter *HTTPAdapter, cfg.ListenAddr = listenAddr } // init server - handler := handlers.CORS()(api.GetRouter()) + handler := handlers.CORS( + handlers.AllowedHeaders([]string{"Content-Type"}), + )(api.GetRouter()) adapter.server = &http.Server{ TLSConfig: cfg.TLSConfig, diff --git a/sqlchain/observer/api.go b/sqlchain/observer/api.go index cc20e7eb8..24a8ca4b2 100644 --- a/sqlchain/observer/api.go +++ b/sqlchain/observer/api.go @@ -715,7 +715,9 @@ func startAPI(service *Service, listenAddr string, version string) (server *http WriteTimeout: apiTimeout * 10, ReadTimeout: apiTimeout, IdleTimeout: apiTimeout, - Handler: handlers.CORS()(router), + Handler: handlers.CORS( + handlers.AllowedHeaders([]string{"Content-Type"}), + )(router), } go func() { From f321302d80b966124955f8d22fc4bc5861948e1c Mon Sep 17 00:00:00 2001 From: auxten Date: Sat, 16 Mar 2019 16:41:32 +0800 Subject: [PATCH 091/244] Add demos --- README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/README.md b/README.md index 2e1a00bfb..429c29c06 100644 --- a/README.md +++ b/README.md @@ -60,6 +60,14 @@ sql.Open("CovenantSQL", dbURI) ## How it works [How CovenantSQL works(video)](https://youtu.be/2Mz5POxxaQM?t=106) +## Demos + +- [CovenantForum](https://demo.covenantsql.io/forum/) +- [Twitter iBlockPin](https://twitter.com/iblockpin) +- [Weibo BlockPin](https://weibo.com/BlockPin) +- [Markdown Editor](https://github.com/CovenantSQL/stackedit) +- [Web Admin for CovenantSQL](https://github.com/CovenantSQL/adminer) + ## Papers Our team members published: From e740de2de600e8ba228fa7aa9bde5566b5246111 Mon Sep 17 00:00:00 2001 From: auxten Date: Sat, 16 Mar 2019 16:51:42 +0800 Subject: [PATCH 092/244] Update demos --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 429c29c06..9714b9c9d 100644 --- a/README.md +++ b/README.md @@ -63,9 +63,9 @@ sql.Open("CovenantSQL", dbURI) ## Demos - [CovenantForum](https://demo.covenantsql.io/forum/) -- [Twitter iBlockPin](https://twitter.com/iblockpin) -- [Weibo BlockPin](https://weibo.com/BlockPin) -- [Markdown Editor](https://github.com/CovenantSQL/stackedit) +- [Twitter Bot @iBlockPin](https://twitter.com/iblockpin) +- [Weibo Bot @BlockPin](https://weibo.com/BlockPin) +- [Markdown Editor with CovenantSQL sync](https://github.com/CovenantSQL/stackedit) - [Web Admin for CovenantSQL](https://github.com/CovenantSQL/adminer) ## Papers From a78498698aa57c365caf6ccc2e760e621bc75390 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 8 Mar 2019 01:05:25 +0800 Subject: [PATCH 093/244] Remove one useless flag. Reorder some code. --- cmd/cql/main.go | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 634051e12..c419b503a 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -72,7 +72,6 @@ var ( singleTransaction bool showVersion bool variables varsFlag - logLevel string // Shard chain explorer/adapter stuff tmpPath string // background observer and explorer block and log file path @@ -201,7 +200,7 @@ func usqlRegister() { return 0, nil }, Open: func(url *dburl.URL) (handler func(driver string, dsn string) (*sql.DB, error), err error) { - log.Infof("connecting to %#v", url.DSN) + cLog.Infof("connecting to %#v", url.DSN) // wait for database to become ready ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) @@ -234,7 +233,6 @@ func usqlRegister() { } func init() { - flag.StringVar(&logLevel, "log-level", "", "Service log level") flag.StringVar(&dsn, "dsn", "", "Database url") flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") @@ -248,8 +246,8 @@ func init() { flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") flag.Var(&variables, "variable", "Set variable") - // Explorer - flag.StringVar(&tmpPath, "tmp-path", "", "Explorer temp file path, use os.TempDir for default") + // Explorer/Adapter + flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") @@ -270,18 +268,6 @@ func main() { rand.Seed(time.Now().UnixNano()) flag.Parse() - log.SetStringLevel(logLevel, log.InfoLevel) - if tmpPath == "" { - tmpPath = os.TempDir() - } - logPath := filepath.Join(tmpPath, "covenant_service.log") - bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) - os.Exit(-1) - } - log.SetOutput(bgLog) - log.SetStringLevel(bgLogLevel, log.InfoLevel) cLog = logrus.New() @@ -301,6 +287,18 @@ func main() { return } + if tmpPath == "" { + tmpPath = os.TempDir() + } + logPath := filepath.Join(tmpPath, "covenant_service.log") + bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) + os.Exit(-1) + } + log.SetOutput(bgLog) + log.SetStringLevel(bgLogLevel, log.InfoLevel) + if explorerAddr != "" { service, httpServer, err = observer.StartObserver(explorerAddr, version) if err != nil { @@ -340,8 +338,6 @@ func main() { // duration. waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod - usqlRegister() - if getBalance { var stableCoinBalance, covenantCoinBalance uint64 @@ -556,6 +552,8 @@ func main() { return } + usqlRegister() + var ( curUser *user.User available = drivers.Available() From b5a8bfeeb39a4a8497bdf9dab1127777235998e9 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 14 Mar 2019 12:29:16 +0800 Subject: [PATCH 094/244] Split cql command func into internal package --- cmd/cql/internal/adapter.go | 1 + cmd/cql/internal/balance.go | 1 + cmd/cql/internal/base.go | 1 + cmd/cql/internal/command.go | 184 +++++++++++++++++++++++++++++++++ cmd/cql/internal/create.go | 1 + cmd/cql/internal/drop.go | 1 + cmd/cql/internal/help.go | 9 ++ cmd/cql/internal/permission.go | 1 + cmd/cql/internal/transfer.go | 1 + cmd/cql/internal/web.go | 1 + cmd/cql/main.go | 118 ++------------------- cmd/cql/util.go | 72 ------------- 12 files changed, 209 insertions(+), 182 deletions(-) create mode 100644 cmd/cql/internal/adapter.go create mode 100644 cmd/cql/internal/balance.go create mode 100644 cmd/cql/internal/base.go create mode 100644 cmd/cql/internal/command.go create mode 100644 cmd/cql/internal/create.go create mode 100644 cmd/cql/internal/drop.go create mode 100644 cmd/cql/internal/help.go create mode 100644 cmd/cql/internal/permission.go create mode 100644 cmd/cql/internal/transfer.go create mode 100644 cmd/cql/internal/web.go delete mode 100644 cmd/cql/util.go diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/adapter.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/balance.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/base.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go new file mode 100644 index 000000000..7ba87965e --- /dev/null +++ b/cmd/cql/internal/command.go @@ -0,0 +1,184 @@ +/* + * Copyright 2016-2018 Kenneth Shaw. + * Copyright 2018 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "context" + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" + "github.com/sirupsen/logrus" + "github.com/xo/dburl" + "github.com/xo/usql/drivers" + "github.com/xo/usql/text" + + "github.com/CovenantSQL/CovenantSQL/client" +) + +// SqTime provides a type that will correctly scan the various timestamps +// values stored by the github.com/mattn/go-sqlite3 driver for time.Time +// values, as well as correctly satisfying the sql/driver/Valuer interface. +type SqTime struct { + time.Time +} + +// Value satisfies the Valuer interface. +func (t SqTime) Value() (driver.Value, error) { + return t.Time, nil +} + +// Scan satisfies the Scanner interface. +func (t *SqTime) Scan(v interface{}) error { + switch x := v.(type) { + case time.Time: + t.Time = x + return nil + case []byte: + return t.parse(string(x)) + case string: + return t.parse(x) + } + + return fmt.Errorf("cannot convert type %s to time.Time", reflect.TypeOf(v)) +} + +// parse attempts to parse string s to t. +func (t *SqTime) parse(s string) error { + if s == "" { + return nil + } + + for _, f := range sqlite3.SQLiteTimestampFormats { + z, err := time.Parse(f, s) + if err == nil { + t.Time = z + return nil + } + } + + return errors.New("could not parse time") +} + +// UsqlRegister init xo/usql driver +func UsqlRegister(log *logrus.Logger, waitTxConfirmationMaxDuration time.Duration, dsn string) { + // set command name of usql + text.CommandName = "covenantsql" + + // register SQLite3 database + drivers.Register("sqlite3", drivers.Driver{ + AllowMultilineComments: true, + ForceParams: drivers.ForceQueryParameters([]string{ + "loc", "auto", + }), + Version: func(db drivers.DB) (string, error) { + var ver string + err := db.QueryRow(`SELECT sqlite_version()`).Scan(&ver) + if err != nil { + return "", err + } + return "SQLite3 " + ver, nil + }, + Err: func(err error) (string, string) { + if e, ok := err.(sqlite3.Error); ok { + return strconv.Itoa(int(e.Code)), e.Error() + } + + code, msg := "", err.Error() + if e, ok := err.(sqlite3.ErrNo); ok { + code = strconv.Itoa(int(e)) + } + + return code, msg + }, + ConvertBytes: func(buf []byte, tfmt string) (string, error) { + // attempt to convert buf if it matches a time format, and if it + // does, then return a formatted time string. + s := string(buf) + if s != "" && strings.TrimSpace(s) != "" { + t := new(SqTime) + if err := t.Scan(buf); err == nil { + return t.Format(tfmt), nil + } + } + return s, nil + }, + }) + + // register CovenantSQL database + drivers.Register("covenantsql", drivers.Driver{ + AllowMultilineComments: true, + Version: func(db drivers.DB) (string, error) { + return Version, nil + }, + Err: func(err error) (string, string) { + return "", err.Error() + }, + ConvertBytes: func(buf []byte, tfmt string) (string, error) { + // attempt to convert buf if it matches a time format, and if it + // does, then return a formatted time string. + s := string(buf) + if s != "" && strings.TrimSpace(s) != "" { + t := new(SqTime) + if err := t.Scan(buf); err == nil { + return t.Format(tfmt), nil + } + } + return s, nil + }, + RowsAffected: func(sql.Result) (int64, error) { + return 0, nil + }, + Open: func(url *dburl.URL) (handler func(driverName, dataSourceName string) (*sql.DB, error), err error) { + log.Infof("connecting to %#v", url.DSN) + + // wait for database to become ready + ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + defer cancel() + if err = client.WaitDBCreation(ctx, dsn); err != nil { + return + } + + return sql.Open, nil + }, + }) + + // register covenantsql:// scheme to dburl + dburl.Register(dburl.Scheme{ + Driver: "covenantsql", + Generator: func(url *dburl.URL) (string, error) { + dbID, err := dburl.GenOpaque(url) + if err != nil { + return "", err + } + cfg := client.NewConfig() + cfg.DatabaseID = dbID + return cfg.FormatDSN(), nil + }, + Proto: 0, + Opaque: true, + Aliases: []string{}, + Override: "", + }) +} diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/create.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/drop.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go new file mode 100644 index 000000000..671ccd44d --- /dev/null +++ b/cmd/cql/internal/help.go @@ -0,0 +1,9 @@ +package internal + +// Name of command +const Name = "cql" + +var ( + // Version of command, set by main func of version + Version = "unknown" +) diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/permission.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/transfer.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go new file mode 100644 index 000000000..5bf0569ce --- /dev/null +++ b/cmd/cql/internal/web.go @@ -0,0 +1 @@ +package internal diff --git a/cmd/cql/main.go b/cmd/cql/main.go index c419b503a..f120625db 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -19,7 +19,6 @@ package main import ( "context" - "database/sql" "encoding/json" "flag" "fmt" @@ -35,10 +34,8 @@ import ( "strings" "time" - sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/xo/dburl" "github.com/xo/usql/drivers" "github.com/xo/usql/env" "github.com/xo/usql/handler" @@ -47,6 +44,7 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" @@ -58,8 +56,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) -const name = "cql" - var ( version = "unknown" dsn string @@ -131,107 +127,6 @@ func (v *varsFlag) Set(value string) error { return nil } -func usqlRegister() { - // set command name of usql - text.CommandName = "covenantsql" - - // register SQLite3 database - drivers.Register("sqlite3", drivers.Driver{ - AllowMultilineComments: true, - ForceParams: drivers.ForceQueryParameters([]string{ - "loc", "auto", - }), - Version: func(db drivers.DB) (string, error) { - var ver string - err := db.QueryRow(`SELECT sqlite_version()`).Scan(&ver) - if err != nil { - return "", err - } - return "SQLite3 " + ver, nil - }, - Err: func(err error) (string, string) { - if e, ok := err.(sqlite3.Error); ok { - return strconv.Itoa(int(e.Code)), e.Error() - } - - code, msg := "", err.Error() - if e, ok := err.(sqlite3.ErrNo); ok { - code = strconv.Itoa(int(e)) - } - - return code, msg - }, - ConvertBytes: func(buf []byte, tfmt string) (string, error) { - // attempt to convert buf if it matches a time format, and if it - // does, then return a formatted time string. - s := string(buf) - if s != "" && strings.TrimSpace(s) != "" { - t := new(SqTime) - if err := t.Scan(buf); err == nil { - return t.Format(tfmt), nil - } - } - return s, nil - }, - }) - - // register CovenantSQL database - drivers.Register("covenantsql", drivers.Driver{ - AllowMultilineComments: true, - Version: func(db drivers.DB) (string, error) { - return version, nil - }, - Err: func(err error) (string, string) { - return "", err.Error() - }, - ConvertBytes: func(buf []byte, tfmt string) (string, error) { - // attempt to convert buf if it matches a time format, and if it - // does, then return a formatted time string. - s := string(buf) - if s != "" && strings.TrimSpace(s) != "" { - t := new(SqTime) - if err := t.Scan(buf); err == nil { - return t.Format(tfmt), nil - } - } - return s, nil - }, - RowsAffected: func(sql.Result) (int64, error) { - return 0, nil - }, - Open: func(url *dburl.URL) (handler func(driver string, dsn string) (*sql.DB, error), err error) { - cLog.Infof("connecting to %#v", url.DSN) - - // wait for database to become ready - ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) - defer cancel() - if err = client.WaitDBCreation(ctx, dsn); err != nil { - return - } - - return sql.Open, nil - }, - }) - - // register covenantsql:// scheme to dburl - dburl.Register(dburl.Scheme{ - Driver: "covenantsql", - Generator: func(url *dburl.URL) (string, error) { - dbID, err := dburl.GenOpaque(url) - if err != nil { - return "", err - } - cfg := client.NewConfig() - cfg.DatabaseID = dbID - return cfg.FormatDSN(), nil - }, - Proto: 0, - Opaque: true, - Aliases: []string{}, - Override: "", - }) -} - func init() { flag.StringVar(&dsn, "dsn", "", "Database url") flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") @@ -263,6 +158,9 @@ func init() { } func main() { + + internal.Version = version + var err error // set random rand.Seed(time.Now().UnixNano()) @@ -273,10 +171,10 @@ func main() { if showVersion { fmt.Printf("%v %v %v %v %v\n", - name, version, runtime.GOOS, runtime.GOARCH, runtime.Version()) + internal.Name, internal.Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } - cLog.Infof("cql build: %#v\n", version) + cLog.Infof("cql build: %#v\n", internal.Version) configFile = utils.HomeDirExpand(configFile) @@ -300,7 +198,7 @@ func main() { log.SetStringLevel(bgLogLevel, log.InfoLevel) if explorerAddr != "" { - service, httpServer, err = observer.StartObserver(explorerAddr, version) + service, httpServer, err = observer.StartObserver(explorerAddr, internal.Version) if err != nil { log.WithError(err).Fatal("start explorer failed") } else { @@ -552,7 +450,7 @@ func main() { return } - usqlRegister() + internal.UsqlRegister(cLog, waitTxConfirmationMaxDuration, dsn) var ( curUser *user.User diff --git a/cmd/cql/util.go b/cmd/cql/util.go deleted file mode 100644 index d20a87e20..000000000 --- a/cmd/cql/util.go +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2016-2018 Kenneth Shaw. - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "database/sql/driver" - "errors" - "fmt" - "reflect" - "time" - - sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" -) - -// SqTime provides a type that will correctly scan the various timestamps -// values stored by the github.com/mattn/go-sqlite3 driver for time.Time -// values, as well as correctly satisfying the sql/driver/Valuer interface. -type SqTime struct { - time.Time -} - -// Value satisfies the Valuer interface. -func (t SqTime) Value() (driver.Value, error) { - return t.Time, nil -} - -// Scan satisfies the Scanner interface. -func (t *SqTime) Scan(v interface{}) error { - switch x := v.(type) { - case time.Time: - t.Time = x - return nil - case []byte: - return t.parse(string(x)) - case string: - return t.parse(x) - } - - return fmt.Errorf("cannot convert type %s to time.Time", reflect.TypeOf(v)) -} - -// parse attempts to parse string s to t. -func (t *SqTime) parse(s string) error { - if s == "" { - return nil - } - - for _, f := range sqlite3.SQLiteTimestampFormats { - z, err := time.Parse(f, s) - if err == nil { - t.Time = z - return nil - } - } - - return errors.New("could not parse time") -} From 4a0867304817958094ff566e290c9f1afa56cd3d Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 14 Mar 2019 15:10:11 +0800 Subject: [PATCH 095/244] Move log and waitTxConfirmationMaxDuration to internal vars. --- cmd/cql/internal/base.go | 13 +++++ cmd/cql/internal/command.go | 7 ++- cmd/cql/main.go | 98 ++++++++++++++++++------------------- 3 files changed, 64 insertions(+), 54 deletions(-) diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 5bf0569ce..fc4af902a 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -1 +1,14 @@ package internal + +import ( + "time" + + "github.com/sirupsen/logrus" +) + +var ( + WaitTxConfirmationMaxDuration time.Duration + + // ConsoleLog is logging for console. + ConsoleLog *logrus.Logger +) diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index 7ba87965e..43ad6f37d 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -29,7 +29,6 @@ import ( "time" sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" - "github.com/sirupsen/logrus" "github.com/xo/dburl" "github.com/xo/usql/drivers" "github.com/xo/usql/text" @@ -82,7 +81,7 @@ func (t *SqTime) parse(s string) error { } // UsqlRegister init xo/usql driver -func UsqlRegister(log *logrus.Logger, waitTxConfirmationMaxDuration time.Duration, dsn string) { +func UsqlRegister(dsn string) { // set command name of usql text.CommandName = "covenantsql" @@ -151,10 +150,10 @@ func UsqlRegister(log *logrus.Logger, waitTxConfirmationMaxDuration time.Duratio return 0, nil }, Open: func(url *dburl.URL) (handler func(driverName, dataSourceName string) (*sql.DB, error), err error) { - log.Infof("connecting to %#v", url.DSN) + ConsoleLog.Infof("connecting to %#v", url.DSN) // wait for database to become ready - ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + ctx, cancel := context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) defer cancel() if err = client.WaitDBCreation(ctx, dsn); err != nil { return diff --git a/cmd/cql/main.go b/cmd/cql/main.go index f120625db..32ec29112 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -70,11 +70,10 @@ var ( variables varsFlag // Shard chain explorer/adapter stuff - tmpPath string // background observer and explorer block and log file path - cLog *logrus.Logger // console logger - bgLogLevel string // background log level - explorerAddr string // explorer Web addr - adapterAddr string // adapter listen addr + tmpPath string // background observer and explorer block and log file path + bgLogLevel string // background log level + explorerAddr string // explorer Web addr + adapterAddr string // adapter listen addr // DML variables createDB string // as a instance meta json string or simply a node count @@ -85,9 +84,8 @@ var ( getBalanceWithTokenName string // get specific token's balance of current account waitTxConfirmation bool // wait for transaction confirmation before exiting - waitTxConfirmationMaxDuration time.Duration - service *observer.Service - httpServer *http.Server + service *observer.Service + httpServer *http.Server ) type userPermission struct { @@ -167,20 +165,20 @@ func main() { flag.Parse() - cLog = logrus.New() + internal.ConsoleLog = logrus.New() if showVersion { fmt.Printf("%v %v %v %v %v\n", internal.Name, internal.Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) os.Exit(0) } - cLog.Infof("cql build: %#v\n", internal.Version) + internal.ConsoleLog.Infof("cql build: %#v\n", internal.Version) configFile = utils.HomeDirExpand(configFile) // init covenantsql driver if err = client.Init(configFile, []byte(password)); err != nil { - cLog.WithError(err).Error("init covenantsql client failed") + internal.ConsoleLog.WithError(err).Error("init covenantsql client failed") os.Exit(-1) return } @@ -202,7 +200,7 @@ func main() { if err != nil { log.WithError(err).Fatal("start explorer failed") } else { - cLog.Infof("explorer started on %s", explorerAddr) + internal.ConsoleLog.Infof("explorer started on %s", explorerAddr) } defer func() { @@ -220,7 +218,7 @@ func main() { if err = server.Serve(); err != nil { log.WithError(err).Fatal("start adapter failed") } else { - cLog.Infof("adapter started on %s", adapterAddr) + internal.ConsoleLog.Infof("adapter started on %s", adapterAddr) defer func() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) @@ -234,22 +232,22 @@ func main() { // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. - waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod + internal.WaitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod if getBalance { var stableCoinBalance, covenantCoinBalance uint64 if stableCoinBalance, err = client.GetTokenBalance(types.Particle); err != nil { - cLog.WithError(err).Error("get Particle balance failed") + internal.ConsoleLog.WithError(err).Error("get Particle balance failed") return } if covenantCoinBalance, err = client.GetTokenBalance(types.Wave); err != nil { - cLog.WithError(err).Error("get Wave balance failed") + internal.ConsoleLog.WithError(err).Error("get Wave balance failed") return } - cLog.Infof("Particle balance is: %d", stableCoinBalance) - cLog.Infof("Wave balance is: %d", covenantCoinBalance) + internal.ConsoleLog.Infof("Particle balance is: %d", stableCoinBalance) + internal.ConsoleLog.Infof("Wave balance is: %d", covenantCoinBalance) return } @@ -262,17 +260,17 @@ func main() { for i := types.Particle; i < types.SupportTokenNumber; i++ { values[i] = types.TokenList[i] } - cLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", + internal.ConsoleLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", strings.Join(values, ", ")) os.Exit(-1) return } if tokenBalance, err = client.GetTokenBalance(tokenType); err != nil { - cLog.WithError(err).Error("get token balance failed") + internal.ConsoleLog.WithError(err).Error("get token balance failed") os.Exit(-1) return } - cLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) + internal.ConsoleLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) return } @@ -288,7 +286,7 @@ func main() { txHash, err := client.Drop(dropDB) if err != nil { // drop database failed - cLog.WithField("db", dropDB).WithError(err).Error("drop database failed") + internal.ConsoleLog.WithField("db", dropDB).WithError(err).Error("drop database failed") return } @@ -297,7 +295,7 @@ func main() { } // drop database success - cLog.Infof("drop database %#v success", dropDB) + internal.ConsoleLog.Infof("drop database %#v success", dropDB) return } @@ -311,7 +309,7 @@ func main() { nodeCnt, err := strconv.ParseUint(createDB, 10, 16) if err != nil { // still failing - cLog.WithField("db", createDB).Error("create database failed: invalid instance description") + internal.ConsoleLog.WithField("db", createDB).Error("create database failed: invalid instance description") os.Exit(-1) return } @@ -322,23 +320,23 @@ func main() { txHash, dsn, err := client.Create(meta) if err != nil { - cLog.WithError(err).Error("create database failed") + internal.ConsoleLog.WithError(err).Error("create database failed") os.Exit(-1) return } if waitTxConfirmation { wait(txHash) - var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) defer cancel() err = client.WaitDBCreation(ctx, dsn) if err != nil { - cLog.WithError(err).Error("create database failed durating creation") + internal.ConsoleLog.WithError(err).Error("create database failed durating creation") os.Exit(-1) } } - cLog.Infof("the newly created database is: %#v", dsn) + internal.ConsoleLog.Infof("the newly created database is: %#v", dsn) fmt.Printf(dsn) return } @@ -347,7 +345,7 @@ func main() { // update user's permission on sqlchain var perm userPermission if err := json.Unmarshal([]byte(updatePermission), &perm); err != nil { - cLog.WithError(err).Errorf("update permission failed: invalid permission description") + internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return } @@ -357,7 +355,7 @@ func main() { if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { // try again using role string representation if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { - cLog.WithError(err).Errorf("update permission failed: invalid permission description") + internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") os.Exit(-1) return } @@ -369,14 +367,14 @@ func main() { } if !p.IsValid() { - cLog.Errorf("update permission failed: invalid permission description") + internal.ConsoleLog.Errorf("update permission failed: invalid permission description") os.Exit(-1) return } txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) if err != nil { - cLog.WithError(err).Error("update permission failed") + internal.ConsoleLog.WithError(err).Error("update permission failed") os.Exit(-1) return } @@ -389,7 +387,7 @@ func main() { } } - cLog.Info("succeed in sending transaction to CovenantSQL") + internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") return } @@ -397,35 +395,35 @@ func main() { // transfer token var tran tranToken if err := json.Unmarshal([]byte(transferToken), &tran); err != nil { - cLog.WithError(err).Errorf("transfer token failed: invalid transfer description") + internal.ConsoleLog.WithError(err).Errorf("transfer token failed: invalid transfer description") os.Exit(-1) return } var validAmount = regexp.MustCompile(`^([0-9]+) *([a-zA-Z]+)$`) if !validAmount.MatchString(tran.Amount) { - cLog.Error("transfer token failed: invalid transfer description") + internal.ConsoleLog.Error("transfer token failed: invalid transfer description") os.Exit(-1) return } amountUnit := validAmount.FindStringSubmatch(tran.Amount) if len(amountUnit) != 3 { - cLog.Error("transfer token failed: invalid transfer description") + internal.ConsoleLog.Error("transfer token failed: invalid transfer description") for _, v := range amountUnit { - cLog.Error(v) + internal.ConsoleLog.Error(v) } os.Exit(-1) return } amount, err := strconv.ParseUint(amountUnit[1], 10, 64) if err != nil { - cLog.Error("transfer token failed: invalid token amount") + internal.ConsoleLog.Error("transfer token failed: invalid token amount") os.Exit(-1) return } unit := types.FromString(amountUnit[2]) if !unit.Listed() { - cLog.Error("transfer token failed: invalid token type") + internal.ConsoleLog.Error("transfer token failed: invalid token type") os.Exit(-1) return } @@ -433,7 +431,7 @@ func main() { var txHash hash.Hash txHash, err = client.TransferToken(tran.TargetUser, amount, unit) if err != nil { - cLog.WithError(err).Error("transfer token failed") + internal.ConsoleLog.WithError(err).Error("transfer token failed") os.Exit(-1) return } @@ -446,11 +444,11 @@ func main() { } } - cLog.Info("succeed in sending transaction to CovenantSQL") + internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") return } - internal.UsqlRegister(cLog, waitTxConfirmationMaxDuration, dsn) + internal.UsqlRegister(dsn) var ( curUser *user.User @@ -460,7 +458,7 @@ func main() { // in docker, fake user var wd string if wd, err = os.Getwd(); err != nil { - cLog.WithError(err).Error("get working directory failed") + internal.ConsoleLog.WithError(err).Error("get working directory failed") os.Exit(-1) return } @@ -473,7 +471,7 @@ func main() { } } else { if curUser, err = user.Current(); err != nil { - cLog.WithError(err).Error("get current user failed") + internal.ConsoleLog.WithError(err).Error("get current user failed") os.Exit(-1) return } @@ -482,14 +480,14 @@ func main() { // run err = run(curUser) if err != nil && err != io.EOF && err != rline.ErrInterrupt { - cLog.WithError(err).Error("run cli error") + internal.ConsoleLog.WithError(err).Error("run cli error") if e, ok := err.(*drivers.Error); ok && e.Err == text.ErrDriverNotAvailable { bindings := make([]string, 0, len(available)) for name := range available { bindings = append(bindings, name) } - cLog.Infof("available drivers are: %#v", bindings) + internal.ConsoleLog.Infof("available drivers are: %#v", bindings) } os.Exit(-1) return @@ -504,11 +502,11 @@ func main() { } func wait(txHash hash.Hash) (err error) { - var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) + var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) defer cancel() var state pi.TransactionState state, err = client.WaitTxConfirmation(ctx, txHash) - cLog.WithFields(logrus.Fields{ + internal.ConsoleLog.WithFields(logrus.Fields{ "tx_hash": txHash, "tx_state": state, }).WithError(err).Info("wait transaction confirmation") @@ -572,14 +570,14 @@ func run(u *user.User) (err error) { h.SetSingleLineMode(true) h.Reset([]rune(command)) if err = h.Run(); err != nil && err != io.EOF { - cLog.WithError(err).Error("run command failed") + internal.ConsoleLog.WithError(err).Error("run command failed") os.Exit(-1) return } } else if fileName != "" { // file if err = h.Include(fileName, false); err != nil { - cLog.WithError(err).Error("run file failed") + internal.ConsoleLog.WithError(err).Error("run file failed") os.Exit(-1) return } From b5e08755f4688b8a56fd6e6a006f607c70c2e82d Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 14 Mar 2019 15:44:29 +0800 Subject: [PATCH 096/244] Fix UsqlRegister should not use outside variable. --- cmd/cql/internal/command.go | 4 ++-- cmd/cql/main.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index 43ad6f37d..6e88532b4 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -81,7 +81,7 @@ func (t *SqTime) parse(s string) error { } // UsqlRegister init xo/usql driver -func UsqlRegister(dsn string) { +func UsqlRegister() { // set command name of usql text.CommandName = "covenantsql" @@ -155,7 +155,7 @@ func UsqlRegister(dsn string) { // wait for database to become ready ctx, cancel := context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) defer cancel() - if err = client.WaitDBCreation(ctx, dsn); err != nil { + if err = client.WaitDBCreation(ctx, url.DSN); err != nil { return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 32ec29112..b2173f5f8 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -448,7 +448,7 @@ func main() { return } - internal.UsqlRegister(dsn) + internal.UsqlRegister() var ( curUser *user.User From 60a6806c2293a74462f1ebbbd0bd2cadb9bf16eb Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 14 Mar 2019 16:07:47 +0800 Subject: [PATCH 097/244] Move cql console funcs to internal/command --- cmd/cql/internal/command.go | 157 +++++++++++++++++++++++++++++++++++- cmd/cql/main.go | 153 +---------------------------------- 2 files changed, 158 insertions(+), 152 deletions(-) diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index 6e88532b4..ba95bf776 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -22,7 +22,11 @@ import ( "database/sql" "database/sql/driver" "errors" + "flag" "fmt" + "io" + "os" + "os/user" "reflect" "strconv" "strings" @@ -31,6 +35,9 @@ import ( sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" "github.com/xo/dburl" "github.com/xo/usql/drivers" + "github.com/xo/usql/env" + "github.com/xo/usql/handler" + "github.com/xo/usql/rline" "github.com/xo/usql/text" "github.com/CovenantSQL/CovenantSQL/client" @@ -80,8 +87,26 @@ func (t *SqTime) parse(s string) error { return errors.New("could not parse time") } +type VarsFlag struct { + flag.Value + vars []string +} + +func (v *VarsFlag) Get() []string { + return append([]string{}, v.vars...) +} + +func (v *VarsFlag) String() string { + return fmt.Sprintf("%#v", v.vars) +} + +func (v *VarsFlag) Set(value string) error { + v.vars = append(v.vars, value) + return nil +} + // UsqlRegister init xo/usql driver -func UsqlRegister() { +func usqlRegister() { // set command name of usql text.CommandName = "covenantsql" @@ -181,3 +206,133 @@ func UsqlRegister() { Override: "", }) } + +func run(u *user.User, dsn, command, fileName, outFile string, noRC, singleTransaction bool, variables VarsFlag) (err error) { + // get working directory + wd, err := os.Getwd() + if err != nil { + return err + } + + // handle variables + for _, v := range variables.Get() { + if i := strings.Index(v, "="); i != -1 { + env.Set(v[:i], v[i+1:]) + } else { + env.Unset(v) + } + } + + // create input/output + interactive := command != "" || fileName != "" + l, err := rline.New(interactive, outFile, env.HistoryFile(u)) + if err != nil { + return err + } + defer l.Close() + + // create handler + h := handler.New(l, u, wd, true) + + // open dsn + if err = h.Open(dsn); err != nil { + return err + } + + // start transaction + if singleTransaction { + if h.IO().Interactive() { + return text.ErrSingleTransactionCannotBeUsedWithInteractiveMode + } + if err = h.Begin(); err != nil { + return err + } + } + + // rc file + if rc := env.RCFile(u); !noRC && rc != "" { + if err = h.Include(rc, false); err != nil && err != text.ErrNoSuchFileOrDirectory { + return err + } + } + + if command != "" { + // one liner command + h.SetSingleLineMode(true) + h.Reset([]rune(command)) + if err = h.Run(); err != nil && err != io.EOF { + ConsoleLog.WithError(err).Error("run command failed") + os.Exit(-1) + return + } + } else if fileName != "" { + // file + if err = h.Include(fileName, false); err != nil { + ConsoleLog.WithError(err).Error("run file failed") + os.Exit(-1) + return + } + } else { + // interactive + if err = h.Run(); err != nil { + return + } + + } + + // commit + if singleTransaction { + return h.Commit() + } + + return nil +} + +// RunConsole runs a console for sql operation in command line. +func RunConsole(dsn, command, fileName, outFile string, noRC, singleTransaction bool, variables VarsFlag) { + + usqlRegister() + + var ( + curUser *user.User + available = drivers.Available() + ) + if st, err := os.Stat("/.dockerenv"); err == nil && !st.IsDir() { + // in docker, fake user + var wd string + if wd, err = os.Getwd(); err != nil { + ConsoleLog.WithError(err).Error("get working directory failed") + os.Exit(-1) + return + } + curUser = &user.User{ + Uid: "0", + Gid: "0", + Username: "docker", + Name: "docker", + HomeDir: wd, + } + } else { + if curUser, err = user.Current(); err != nil { + ConsoleLog.WithError(err).Error("get current user failed") + os.Exit(-1) + return + } + } + + // run + err := run(curUser, dsn, command, fileName, outFile, noRC, singleTransaction, variables) + if err != nil && err != io.EOF && err != rline.ErrInterrupt { + ConsoleLog.WithError(err).Error("run cli error") + + if e, ok := err.(*drivers.Error); ok && e.Err == text.ErrDriverNotAvailable { + bindings := make([]string, 0, len(available)) + for name := range available { + bindings = append(bindings, name) + } + ConsoleLog.Infof("available drivers are: %#v", bindings) + } + os.Exit(-1) + return + } +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index b2173f5f8..fde4c75d0 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -22,11 +22,9 @@ import ( "encoding/json" "flag" "fmt" - "io" "math/rand" "net/http" "os" - "os/user" "path/filepath" "regexp" "runtime" @@ -36,11 +34,6 @@ import ( "github.com/pkg/errors" "github.com/sirupsen/logrus" - "github.com/xo/usql/drivers" - "github.com/xo/usql/env" - "github.com/xo/usql/handler" - "github.com/xo/usql/rline" - "github.com/xo/usql/text" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" @@ -67,7 +60,7 @@ var ( password string singleTransaction bool showVersion bool - variables varsFlag + variables internal.VarsFlag // Shard chain explorer/adapter stuff tmpPath string // background observer and explorer block and log file path @@ -107,24 +100,6 @@ type tranToken struct { Amount string `json:"amount"` } -type varsFlag struct { - flag.Value - vars []string -} - -func (v *varsFlag) Get() []string { - return append([]string{}, v.vars...) -} - -func (v *varsFlag) String() string { - return fmt.Sprintf("%#v", v.vars) -} - -func (v *varsFlag) Set(value string) error { - v.vars = append(v.vars, value) - return nil -} - func init() { flag.StringVar(&dsn, "dsn", "", "Database url") flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") @@ -448,50 +423,7 @@ func main() { return } - internal.UsqlRegister() - - var ( - curUser *user.User - available = drivers.Available() - ) - if st, err := os.Stat("/.dockerenv"); err == nil && !st.IsDir() { - // in docker, fake user - var wd string - if wd, err = os.Getwd(); err != nil { - internal.ConsoleLog.WithError(err).Error("get working directory failed") - os.Exit(-1) - return - } - curUser = &user.User{ - Uid: "0", - Gid: "0", - Username: "docker", - Name: "docker", - HomeDir: wd, - } - } else { - if curUser, err = user.Current(); err != nil { - internal.ConsoleLog.WithError(err).Error("get current user failed") - os.Exit(-1) - return - } - } - - // run - err = run(curUser) - if err != nil && err != io.EOF && err != rline.ErrInterrupt { - internal.ConsoleLog.WithError(err).Error("run cli error") - - if e, ok := err.(*drivers.Error); ok && e.Err == text.ErrDriverNotAvailable { - bindings := make([]string, 0, len(available)) - for name := range available { - bindings = append(bindings, name) - } - internal.ConsoleLog.Infof("available drivers are: %#v", bindings) - } - os.Exit(-1) - return - } + internal.RunConsole(dsn, command, fileName, outFile, noRC, singleTransaction, variables) // if web flag is enabled if explorerAddr != "" || adapterAddr != "" { @@ -515,84 +447,3 @@ func wait(txHash hash.Hash) (err error) { } return } - -func run(u *user.User) (err error) { - // get working directory - wd, err := os.Getwd() - if err != nil { - return err - } - - // handle variables - for _, v := range variables.Get() { - if i := strings.Index(v, "="); i != -1 { - env.Set(v[:i], v[i+1:]) - } else { - env.Unset(v) - } - } - - // create input/output - interactive := command != "" || fileName != "" - l, err := rline.New(interactive, outFile, env.HistoryFile(u)) - if err != nil { - return err - } - defer l.Close() - - // create handler - h := handler.New(l, u, wd, true) - - // open dsn - if err = h.Open(dsn); err != nil { - return err - } - - // start transaction - if singleTransaction { - if h.IO().Interactive() { - return text.ErrSingleTransactionCannotBeUsedWithInteractiveMode - } - if err = h.Begin(); err != nil { - return err - } - } - - // rc file - if rc := env.RCFile(u); !noRC && rc != "" { - if err = h.Include(rc, false); err != nil && err != text.ErrNoSuchFileOrDirectory { - return err - } - } - - if command != "" { - // one liner command - h.SetSingleLineMode(true) - h.Reset([]rune(command)) - if err = h.Run(); err != nil && err != io.EOF { - internal.ConsoleLog.WithError(err).Error("run command failed") - os.Exit(-1) - return - } - } else if fileName != "" { - // file - if err = h.Include(fileName, false); err != nil { - internal.ConsoleLog.WithError(err).Error("run file failed") - os.Exit(-1) - return - } - } else { - // interactive - if err = h.Run(); err != nil { - return - } - - } - - // commit - if singleTransaction { - return h.Commit() - } - - return nil -} From 32718ecd3294810ad8885c304b9c403c8de8c24c Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 14 Mar 2019 23:46:16 +0800 Subject: [PATCH 098/244] Refactor interactive usql feature into console command. --- cmd/cql/internal/base.go | 89 +++++ cmd/cql/internal/cfg.go | 44 +++ cmd/cql/internal/command.go | 39 +- cmd/cql/main.go | 695 ++++++++++++++++++------------------ 4 files changed, 517 insertions(+), 350 deletions(-) create mode 100644 cmd/cql/internal/cfg.go diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index fc4af902a..4cdf3714f 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -1,6 +1,11 @@ package internal import ( + "flag" + "fmt" + "os" + "strings" + "sync" "time" "github.com/sirupsen/logrus" @@ -11,4 +16,88 @@ var ( // ConsoleLog is logging for console. ConsoleLog *logrus.Logger + + // CqlCommands initialized in package main + CqlCommands []*Command ) + +// A Command is an implementation of a cql command +// like cql create or cql transfer. +type Command struct { + // Run runs the command. + // The args are the arguments after the command name. + Run func(cmd *Command, args []string) + + // UsageLine is the one-line usage message. + // The first word in the line is taken to be the command name. + UsageLine string + + // Long is the long message shown in the 'cql help ' output. + Description string + + // Flag is a set of flags specific to this command. + Flag flag.FlagSet +} + +// LongName returns the command's long name: all the words in the usage line between "cql" and a flag or argument, +func (c *Command) LongName() string { + name := c.UsageLine + if i := strings.Index(name, " ["); i >= 0 { + name = name[:i] + } + if name == "cql" { + return "" + } + return strings.TrimPrefix(name, "cql ") +} + +// Name returns the command's short name: the last word in the usage line before a flag or argument. +func (c *Command) Name() string { + name := c.LongName() + if i := strings.LastIndex(name, " "); i >= 0 { + name = name[i+1:] + } + return name +} + +func (c *Command) Usage() { + fmt.Fprintf(os.Stderr, "usage: %s\n", c.UsageLine) + fmt.Fprintf(os.Stderr, "Run 'cql help %s' for details.\n", c.LongName()) + os.Exit(2) +} + +// Runnable reports whether the command can be run; otherwise +// it is a documentation pseudo-command such as importpath. +func (c *Command) Runnable() bool { + return c.Run != nil +} + +var atExitFuncs []func() + +func AtExit(f func()) { + atExitFuncs = append(atExitFuncs, f) +} + +func Exit() { + for _, f := range atExitFuncs { + f() + } + os.Exit(exitStatus) +} + +func ExitIfErrors() { + if exitStatus != 0 { + Exit() + } +} + +var exitStatus = 0 +var exitMu sync.Mutex + +func SetExitStatus(n int) { + exitMu.Lock() + if exitStatus < n { + exitStatus = n + } + exitMu.Unlock() +} diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go new file mode 100644 index 000000000..eda62f3f3 --- /dev/null +++ b/cmd/cql/internal/cfg.go @@ -0,0 +1,44 @@ +package internal + +import ( + "os" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils" +) + +// These are general flags used by console and other commands. +var ( + configFile string + password string + + CmdName string +) + +// AddCommonFlags adds the flags common to all commands. +func AddCommonFlags(cmd *Command) { + cmd.Flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") + cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql") + + // Undocumented, unstable debugging flags. + cmd.Flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, + "Disable signature sign and verify, for testing") +} + +func configInit() { + configFile = utils.HomeDirExpand(configFile) + + // init covenantsql driver + if err := client.Init(configFile, []byte(password)); err != nil { + ConsoleLog.WithError(err).Error("init covenantsql client failed") + os.Exit(-1) + return + } + + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have + // enough informations from config to do that currently, so just use a fixed and long enough + // duration. + WaitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod +} diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index ba95bf776..f8e845012 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -43,6 +43,35 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" ) +var CmdConsole = &Command{ + UsageLine: "cql console [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables]", + Description: "run a console for realtime sql operation", +} + +//TODO(laodouya) add web/adapter flag {command/filename} +var ( + variables VarsFlag + dsn string + outFile string + noRC bool + singleTransaction bool + command string + fileName string +) + +func init() { + CmdConsole.Run = runConsole + + AddCommonFlags(CmdConsole) + CmdConsole.Flag.Var(&variables, "variable", "Set variable") + CmdConsole.Flag.StringVar(&dsn, "dsn", "", "Database url") + CmdConsole.Flag.StringVar(&outFile, "out", "", "Record stdout to file") + CmdConsole.Flag.BoolVar(&noRC, "no-rc", false, "Do not read start up file") + CmdConsole.Flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") + CmdConsole.Flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") + CmdConsole.Flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") +} + // SqTime provides a type that will correctly scan the various timestamps // values stored by the github.com/mattn/go-sqlite3 driver for time.Time // values, as well as correctly satisfying the sql/driver/Valuer interface. @@ -207,7 +236,7 @@ func usqlRegister() { }) } -func run(u *user.User, dsn, command, fileName, outFile string, noRC, singleTransaction bool, variables VarsFlag) (err error) { +func run(u *user.User) (err error) { // get working directory wd, err := os.Getwd() if err != nil { @@ -288,8 +317,10 @@ func run(u *user.User, dsn, command, fileName, outFile string, noRC, singleTrans return nil } -// RunConsole runs a console for sql operation in command line. -func RunConsole(dsn, command, fileName, outFile string, noRC, singleTransaction bool, variables VarsFlag) { +// runConsole runs a console for sql operation in command line. +func runConsole(cmd *Command, args []string) { + + configInit() usqlRegister() @@ -321,7 +352,7 @@ func RunConsole(dsn, command, fileName, outFile string, noRC, singleTransaction } // run - err := run(curUser, dsn, command, fileName, outFile, noRC, singleTransaction, variables) + err := run(curUser) if err != nil && err != io.EOF && err != rline.ErrInterrupt { ConsoleLog.WithError(err).Error("run cli error") diff --git a/cmd/cql/main.go b/cmd/cql/main.go index fde4c75d0..4668b42b0 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -23,12 +23,7 @@ import ( "flag" "fmt" "math/rand" - "net/http" "os" - "path/filepath" - "regexp" - "runtime" - "strconv" "strings" "time" @@ -38,47 +33,35 @@ import ( pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter" - "github.com/CovenantSQL/CovenantSQL/sqlchain/observer" "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils" - "github.com/CovenantSQL/CovenantSQL/utils/log" ) var ( - version = "unknown" - dsn string - command string - fileName string - outFile string - noRC bool - configFile string - password string - singleTransaction bool - showVersion bool - variables internal.VarsFlag - - // Shard chain explorer/adapter stuff - tmpPath string // background observer and explorer block and log file path - bgLogLevel string // background log level - explorerAddr string // explorer Web addr - adapterAddr string // adapter listen addr - - // DML variables - createDB string // as a instance meta json string or simply a node count - dropDB string // database id to drop - updatePermission string // update user's permission on specific sqlchain - transferToken string // transfer token to target account - getBalance bool // get balance of current account - getBalanceWithTokenName string // get specific token's balance of current account - waitTxConfirmation bool // wait for transaction confirmation before exiting - - service *observer.Service - httpServer *http.Server + version = "unknown" + +// configFile string +// password string +// showVersion bool +// +// // Shard chain explorer/adapter stuff +// tmpPath string // background observer and explorer block and log file path +// bgLogLevel string // background log level +// explorerAddr string // explorer Web addr +// adapterAddr string // adapter listen addr +// +// // DML variables +// createDB string // as a instance meta json string or simply a node count +// dropDB string // database id to drop +// updatePermission string // update user's permission on specific sqlchain +// transferToken string // transfer token to target account +// getBalance bool // get balance of current account +// getBalanceWithTokenName string // get specific token's balance of current account +// waitTxConfirmation bool // wait for transaction confirmation before exiting +// +// service *observer.Service +// httpServer *http.Server ) type userPermission struct { @@ -101,336 +84,351 @@ type tranToken struct { } func init() { - flag.StringVar(&dsn, "dsn", "", "Database url") - flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") - flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") - flag.BoolVar(&showVersion, "version", false, "Show version information and exit") - flag.BoolVar(&noRC, "no-rc", false, "Do not read start up file") - flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, - "Disable signature sign and verify, for testing") - flag.StringVar(&outFile, "out", "", "Record stdout to file") - flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") - flag.StringVar(&password, "password", "", "Master key password for covenantsql") - flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") - flag.Var(&variables, "variable", "Set variable") - - // Explorer/Adapter - flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") - flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") - flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") - flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") - - // DML flags - flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") - flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") - flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") - flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") - flag.BoolVar(&getBalance, "get-balance", false, "Get balance of current account") - flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") - flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") + // flag.BoolVar(&showVersion, "version", false, "Show version information and exit") + // flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, + // "Disable signature sign and verify, for testing") + // flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") + // flag.StringVar(&password, "password", "", "Master key password for covenantsql") + + // // Explorer/Adapter + // flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") + // flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") // flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") + // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") + + // // DML flags + // flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") + // flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") + // flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") + // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") + // flag.BoolVar(&getBalance, "get-balance", false, "Get balance of current account") + // flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") + // flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") + + internal.CqlCommands = []*internal.Command{ + internal.CmdConsole, + } } func main() { internal.Version = version - var err error // set random rand.Seed(time.Now().UnixNano()) + flag.Usage = mainUsage flag.Parse() - internal.ConsoleLog = logrus.New() - - if showVersion { - fmt.Printf("%v %v %v %v %v\n", - internal.Name, internal.Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) - os.Exit(0) - } - internal.ConsoleLog.Infof("cql build: %#v\n", internal.Version) - - configFile = utils.HomeDirExpand(configFile) - - // init covenantsql driver - if err = client.Init(configFile, []byte(password)); err != nil { - internal.ConsoleLog.WithError(err).Error("init covenantsql client failed") - os.Exit(-1) - return - } - - if tmpPath == "" { - tmpPath = os.TempDir() - } - logPath := filepath.Join(tmpPath, "covenant_service.log") - bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - if err != nil { - fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) - os.Exit(-1) - } - log.SetOutput(bgLog) - log.SetStringLevel(bgLogLevel, log.InfoLevel) - - if explorerAddr != "" { - service, httpServer, err = observer.StartObserver(explorerAddr, internal.Version) - if err != nil { - log.WithError(err).Fatal("start explorer failed") - } else { - internal.ConsoleLog.Infof("explorer started on %s", explorerAddr) - } - - defer func() { - _ = observer.StopObserver(service, httpServer) - log.Info("explorer stopped") - }() - } - - if adapterAddr != "" { - server, err := adapter.NewHTTPAdapter(adapterAddr, configFile) - if err != nil { - log.WithError(err).Fatal("init adapter failed") - } - - if err = server.Serve(); err != nil { - log.WithError(err).Fatal("start adapter failed") - } else { - internal.ConsoleLog.Infof("adapter started on %s", adapterAddr) - - defer func() { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - defer cancel() - server.Shutdown(ctx) - log.Info("stopped adapter") - }() - } - } - - // TODO(leventeliu): discover more specific confirmation duration from config. We don't have - // enough informations from config to do that currently, so just use a fixed and long enough - // duration. - internal.WaitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod - - if getBalance { - var stableCoinBalance, covenantCoinBalance uint64 - - if stableCoinBalance, err = client.GetTokenBalance(types.Particle); err != nil { - internal.ConsoleLog.WithError(err).Error("get Particle balance failed") - return - } - if covenantCoinBalance, err = client.GetTokenBalance(types.Wave); err != nil { - internal.ConsoleLog.WithError(err).Error("get Wave balance failed") - return - } - - internal.ConsoleLog.Infof("Particle balance is: %d", stableCoinBalance) - internal.ConsoleLog.Infof("Wave balance is: %d", covenantCoinBalance) - - return + args := flag.Args() + if len(args) < 1 { + mainUsage() } - if getBalanceWithTokenName != "" { - var tokenBalance uint64 - tokenType := types.FromString(getBalanceWithTokenName) - if !tokenType.Listed() { - values := make([]string, len(types.TokenList)) - for i := types.Particle; i < types.SupportTokenNumber; i++ { - values[i] = types.TokenList[i] - } - internal.ConsoleLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", - strings.Join(values, ", ")) - os.Exit(-1) - return - } - if tokenBalance, err = client.GetTokenBalance(tokenType); err != nil { - internal.ConsoleLog.WithError(err).Error("get token balance failed") - os.Exit(-1) - return - } - internal.ConsoleLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) + internal.CmdName = args[0] // for error messages + if args[0] == "help" { + mainUsage() return } - if dropDB != "" { - // drop database - if _, err := client.ParseDSN(dropDB); err != nil { - // not a dsn - cfg := client.NewConfig() - cfg.DatabaseID = dropDB - dropDB = cfg.FormatDSN() - } - - txHash, err := client.Drop(dropDB) - if err != nil { - // drop database failed - internal.ConsoleLog.WithField("db", dropDB).WithError(err).Error("drop database failed") - return - } - - if waitTxConfirmation { - wait(txHash) - } - - // drop database success - internal.ConsoleLog.Infof("drop database %#v success", dropDB) - return - } - - if createDB != "" { - // create database - // parse instance requirement - var meta client.ResourceMeta - - if err := json.Unmarshal([]byte(createDB), &meta); err != nil { - // not a instance json, try if it is a number describing node count - nodeCnt, err := strconv.ParseUint(createDB, 10, 16) - if err != nil { - // still failing - internal.ConsoleLog.WithField("db", createDB).Error("create database failed: invalid instance description") - os.Exit(-1) - return - } - - meta = client.ResourceMeta{} - meta.Node = uint16(nodeCnt) - } - - txHash, dsn, err := client.Create(meta) - if err != nil { - internal.ConsoleLog.WithError(err).Error("create database failed") - os.Exit(-1) - return - } - - if waitTxConfirmation { - wait(txHash) - var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) - defer cancel() - err = client.WaitDBCreation(ctx, dsn) - if err != nil { - internal.ConsoleLog.WithError(err).Error("create database failed durating creation") - os.Exit(-1) - } - } - - internal.ConsoleLog.Infof("the newly created database is: %#v", dsn) - fmt.Printf(dsn) - return - } - - if updatePermission != "" { - // update user's permission on sqlchain - var perm userPermission - if err := json.Unmarshal([]byte(updatePermission), &perm); err != nil { - internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return - } - - var permPayload userPermPayload - - if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { - // try again using role string representation - if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { - internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return - } - } - - p := &types.UserPermission{ - Role: permPayload.Role, - Patterns: permPayload.Patterns, - } - - if !p.IsValid() { - internal.ConsoleLog.Errorf("update permission failed: invalid permission description") - os.Exit(-1) - return - } + internal.ConsoleLog = logrus.New() - txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) - if err != nil { - internal.ConsoleLog.WithError(err).Error("update permission failed") - os.Exit(-1) - return + // var err error + // if showVersion { + // fmt.Printf("%v %v %v %v %v\n", + // internal.Name, internal.Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) + // os.Exit(0) + // } + internal.ConsoleLog.Infof("cql build: %#v\n", internal.Version) + // + // if tmpPath == "" { + // tmpPath = os.TempDir() + // } + // logPath := filepath.Join(tmpPath, "covenant_service.log") + // bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + // if err != nil { + // fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) + // os.Exit(-1) + // } + // log.SetOutput(bgLog) + // log.SetStringLevel(bgLogLevel, log.InfoLevel) + // + // if explorerAddr != "" { + // service, httpServer, err = observer.StartObserver(explorerAddr, internal.Version) + // if err != nil { + // log.WithError(err).Fatal("start explorer failed") + // } else { + // internal.ConsoleLog.Infof("explorer started on %s", explorerAddr) + // } + // + // defer func() { + // _ = observer.StopObserver(service, httpServer) + // log.Info("explorer stopped") + // }() + // } + // + // if adapterAddr != "" { + // server, err := adapter.NewHTTPAdapter(adapterAddr, configFile) + // if err != nil { + // log.WithError(err).Fatal("init adapter failed") + // } + // + // if err = server.Serve(); err != nil { + // log.WithError(err).Fatal("start adapter failed") + // } else { + // internal.ConsoleLog.Infof("adapter started on %s", adapterAddr) + // + // defer func() { + // ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + // defer cancel() + // server.Shutdown(ctx) + // log.Info("stopped adapter") + // }() + // } + // } + // + // + // if getBalance { + // var stableCoinBalance, covenantCoinBalance uint64 + // + // if stableCoinBalance, err = client.GetTokenBalance(types.Particle); err != nil { + // internal.ConsoleLog.WithError(err).Error("get Particle balance failed") + // return + // } + // if covenantCoinBalance, err = client.GetTokenBalance(types.Wave); err != nil { + // internal.ConsoleLog.WithError(err).Error("get Wave balance failed") + // return + // } + // + // internal.ConsoleLog.Infof("Particle balance is: %d", stableCoinBalance) + // internal.ConsoleLog.Infof("Wave balance is: %d", covenantCoinBalance) + // + // return + // } + // + // if getBalanceWithTokenName != "" { + // var tokenBalance uint64 + // tokenType := types.FromString(getBalanceWithTokenName) + // if !tokenType.Listed() { + // values := make([]string, len(types.TokenList)) + // for i := types.Particle; i < types.SupportTokenNumber; i++ { + // values[i] = types.TokenList[i] + // } + // internal.ConsoleLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", + // strings.Join(values, ", ")) + // os.Exit(-1) + // return + // } + // if tokenBalance, err = client.GetTokenBalance(tokenType); err != nil { + // internal.ConsoleLog.WithError(err).Error("get token balance failed") + // os.Exit(-1) + // return + // } + // internal.ConsoleLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) + // return + // } + // + // if dropDB != "" { + // // drop database + // if _, err := client.ParseDSN(dropDB); err != nil { + // // not a dsn + // cfg := client.NewConfig() + // cfg.DatabaseID = dropDB + // dropDB = cfg.FormatDSN() + // } + // + // txHash, err := client.Drop(dropDB) + // if err != nil { + // // drop database failed + // internal.ConsoleLog.WithField("db", dropDB).WithError(err).Error("drop database failed") + // return + // } + // + // if waitTxConfirmation { + // wait(txHash) + // } + // + // // drop database success + // internal.ConsoleLog.Infof("drop database %#v success", dropDB) + // return + // } + // + // if createDB != "" { + // // create database + // // parse instance requirement + // var meta client.ResourceMeta + // + // if err := json.Unmarshal([]byte(createDB), &meta); err != nil { + // // not a instance json, try if it is a number describing node count + // nodeCnt, err := strconv.ParseUint(createDB, 10, 16) + // if err != nil { + // // still failing + // internal.ConsoleLog.WithField("db", createDB).Error("create database failed: invalid instance description") + // os.Exit(-1) + // return + // } + // + // meta = client.ResourceMeta{} + // meta.Node = uint16(nodeCnt) + // } + // + // txHash, dsn, err := client.Create(meta) + // if err != nil { + // internal.ConsoleLog.WithError(err).Error("create database failed") + // os.Exit(-1) + // return + // } + // + // if waitTxConfirmation { + // wait(txHash) + // var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) + // defer cancel() + // err = client.WaitDBCreation(ctx, dsn) + // if err != nil { + // internal.ConsoleLog.WithError(err).Error("create database failed durating creation") + // os.Exit(-1) + // } + // } + // + // internal.ConsoleLog.Infof("the newly created database is: %#v", dsn) + // fmt.Printf(dsn) + // return + // } + // + // if updatePermission != "" { + // // update user's permission on sqlchain + // var perm userPermission + // if err := json.Unmarshal([]byte(updatePermission), &perm); err != nil { + // internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") + // os.Exit(-1) + // return + // } + // + // var permPayload userPermPayload + // + // if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { + // // try again using role string representation + // if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { + // internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") + // os.Exit(-1) + // return + // } + // } + // + // p := &types.UserPermission{ + // Role: permPayload.Role, + // Patterns: permPayload.Patterns, + // } + // + // if !p.IsValid() { + // internal.ConsoleLog.Errorf("update permission failed: invalid permission description") + // os.Exit(-1) + // return + // } + // + // txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + // if err != nil { + // internal.ConsoleLog.WithError(err).Error("update permission failed") + // os.Exit(-1) + // return + // } + // + // if waitTxConfirmation { + // err = wait(txHash) + // if err != nil { + // os.Exit(-1) + // return + // } + // } + // + // internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") + // return + // } + // + // if transferToken != "" { + // // transfer token + // var tran tranToken + // if err := json.Unmarshal([]byte(transferToken), &tran); err != nil { + // internal.ConsoleLog.WithError(err).Errorf("transfer token failed: invalid transfer description") + // os.Exit(-1) + // return + // } + // + // var validAmount = regexp.MustCompile(`^([0-9]+) *([a-zA-Z]+)$`) + // if !validAmount.MatchString(tran.Amount) { + // internal.ConsoleLog.Error("transfer token failed: invalid transfer description") + // os.Exit(-1) + // return + // } + // amountUnit := validAmount.FindStringSubmatch(tran.Amount) + // if len(amountUnit) != 3 { + // internal.ConsoleLog.Error("transfer token failed: invalid transfer description") + // for _, v := range amountUnit { + // internal.ConsoleLog.Error(v) + // } + // os.Exit(-1) + // return + // } + // amount, err := strconv.ParseUint(amountUnit[1], 10, 64) + // if err != nil { + // internal.ConsoleLog.Error("transfer token failed: invalid token amount") + // os.Exit(-1) + // return + // } + // unit := types.FromString(amountUnit[2]) + // if !unit.Listed() { + // internal.ConsoleLog.Error("transfer token failed: invalid token type") + // os.Exit(-1) + // return + // } + // + // var txHash hash.Hash + // txHash, err = client.TransferToken(tran.TargetUser, amount, unit) + // if err != nil { + // internal.ConsoleLog.WithError(err).Error("transfer token failed") + // os.Exit(-1) + // return + // } + // + // if waitTxConfirmation { + // err = wait(txHash) + // if err != nil { + // os.Exit(-1) + // return + // } + // } + // + // internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") + // return + // } + + for _, cmd := range internal.CqlCommands { + if cmd.Name() != args[0] { + continue } - - if waitTxConfirmation { - err = wait(txHash) - if err != nil { - os.Exit(-1) - return - } + if !cmd.Runnable() { + continue } - - internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") + cmd.Flag.Usage = func() { cmd.Usage() } + cmd.Flag.Parse(args[1:]) + args = cmd.Flag.Args() + cmd.Run(cmd, args) + internal.Exit() return } - - if transferToken != "" { - // transfer token - var tran tranToken - if err := json.Unmarshal([]byte(transferToken), &tran); err != nil { - internal.ConsoleLog.WithError(err).Errorf("transfer token failed: invalid transfer description") - os.Exit(-1) - return - } - - var validAmount = regexp.MustCompile(`^([0-9]+) *([a-zA-Z]+)$`) - if !validAmount.MatchString(tran.Amount) { - internal.ConsoleLog.Error("transfer token failed: invalid transfer description") - os.Exit(-1) - return - } - amountUnit := validAmount.FindStringSubmatch(tran.Amount) - if len(amountUnit) != 3 { - internal.ConsoleLog.Error("transfer token failed: invalid transfer description") - for _, v := range amountUnit { - internal.ConsoleLog.Error(v) - } - os.Exit(-1) - return - } - amount, err := strconv.ParseUint(amountUnit[1], 10, 64) - if err != nil { - internal.ConsoleLog.Error("transfer token failed: invalid token amount") - os.Exit(-1) - return - } - unit := types.FromString(amountUnit[2]) - if !unit.Listed() { - internal.ConsoleLog.Error("transfer token failed: invalid token type") - os.Exit(-1) - return - } - - var txHash hash.Hash - txHash, err = client.TransferToken(tran.TargetUser, amount, unit) - if err != nil { - internal.ConsoleLog.WithError(err).Error("transfer token failed") - os.Exit(-1) - return - } - - if waitTxConfirmation { - err = wait(txHash) - if err != nil { - os.Exit(-1) - return - } - } - - internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") - return + helpArg := "" + if i := strings.LastIndex(internal.CmdName, " "); i >= 0 { + helpArg = " " + internal.CmdName[:i] } - - internal.RunConsole(dsn, command, fileName, outFile, noRC, singleTransaction, variables) + fmt.Fprintf(os.Stderr, "cql %s: unknown command\nRun 'cql help%s' for usage.\n", internal.CmdName, helpArg) + internal.SetExitStatus(2) + internal.Exit() // if web flag is enabled - if explorerAddr != "" || adapterAddr != "" { - fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s\n", explorerAddr, adapterAddr) - <-utils.WaitForExit() - return - } + //if explorerAddr != "" || adapterAddr != "" { + // fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s\n", explorerAddr, adapterAddr) + // <-utils.WaitForExit() + // return + //} } func wait(txHash hash.Hash) (err error) { @@ -447,3 +445,8 @@ func wait(txHash hash.Hash) (err error) { } return } + +func mainUsage() { + //TODO(laodouya) print stderr main usage + os.Exit(2) +} From c9735f1e2ec8ece342e3ec2519c6335077f2627b Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 14:12:45 +0800 Subject: [PATCH 099/244] Refactor version print into version command. --- cmd/cql/internal/cfg.go | 5 ++--- cmd/cql/internal/command.go | 11 ++++++----- cmd/cql/internal/help.go | 22 ++++++++++++++++++++-- cmd/cql/main.go | 9 +-------- 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index eda62f3f3..d9fa8958a 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -1,8 +1,6 @@ package internal import ( - "os" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" @@ -33,7 +31,8 @@ func configInit() { // init covenantsql driver if err := client.Init(configFile, []byte(password)); err != nil { ConsoleLog.WithError(err).Error("init covenantsql client failed") - os.Exit(-1) + SetExitStatus(1) + Exit() return } diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index f8e845012..6d5640fc5 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -291,14 +291,14 @@ func run(u *user.User) (err error) { h.Reset([]rune(command)) if err = h.Run(); err != nil && err != io.EOF { ConsoleLog.WithError(err).Error("run command failed") - os.Exit(-1) + SetExitStatus(1) return } } else if fileName != "" { // file if err = h.Include(fileName, false); err != nil { ConsoleLog.WithError(err).Error("run file failed") - os.Exit(-1) + SetExitStatus(1) return } } else { @@ -333,7 +333,7 @@ func runConsole(cmd *Command, args []string) { var wd string if wd, err = os.Getwd(); err != nil { ConsoleLog.WithError(err).Error("get working directory failed") - os.Exit(-1) + SetExitStatus(1) return } curUser = &user.User{ @@ -346,13 +346,14 @@ func runConsole(cmd *Command, args []string) { } else { if curUser, err = user.Current(); err != nil { ConsoleLog.WithError(err).Error("get current user failed") - os.Exit(-1) + SetExitStatus(1) return } } // run err := run(curUser) + ExitIfErrors() if err != nil && err != io.EOF && err != rline.ErrInterrupt { ConsoleLog.WithError(err).Error("run cli error") @@ -363,7 +364,7 @@ func runConsole(cmd *Command, args []string) { } ConsoleLog.Infof("available drivers are: %#v", bindings) } - os.Exit(-1) + SetExitStatus(1) return } } diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 671ccd44d..1b6549f49 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -1,9 +1,27 @@ package internal -// Name of command -const Name = "cql" +import ( + "fmt" + "runtime" +) + +const name = "cql" var ( // Version of command, set by main func of version Version = "unknown" ) + +var CmdVersion = &Command{ + UsageLine: "cql version", + Description: "Show cql build version infomation", +} + +func init() { + CmdVersion.Run = runVersion +} + +func runVersion(cmd *Command, args []string) { + fmt.Printf("%v %v %v %v %v\n", + name, Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 4668b42b0..2f2ffd7c6 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -43,7 +43,6 @@ var ( // configFile string // password string -// showVersion bool // // // Shard chain explorer/adapter stuff // tmpPath string // background observer and explorer block and log file path @@ -84,7 +83,6 @@ type tranToken struct { } func init() { - // flag.BoolVar(&showVersion, "version", false, "Show version information and exit") // flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, // "Disable signature sign and verify, for testing") // flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") @@ -106,6 +104,7 @@ func init() { internal.CqlCommands = []*internal.Command{ internal.CmdConsole, + internal.CmdVersion, } } @@ -132,12 +131,6 @@ func main() { internal.ConsoleLog = logrus.New() - // var err error - // if showVersion { - // fmt.Printf("%v %v %v %v %v\n", - // internal.Name, internal.Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) - // os.Exit(0) - // } internal.ConsoleLog.Infof("cql build: %#v\n", internal.Version) // // if tmpPath == "" { From d18607228838650dcd38ee3557f3785d927f3000 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 14:52:12 +0800 Subject: [PATCH 100/244] Refactor token balance check funcs into balance command. --- cmd/cql/internal/balance.go | 65 +++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 45 +------------------------ 2 files changed, 66 insertions(+), 44 deletions(-) diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 5bf0569ce..4bc2e437c 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -1 +1,66 @@ package internal + +import ( + "strings" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/types" +) + +var ( + tokenName string // get specific token's balance of current account +) + +var CmdBalance = &Command{ + UsageLine: "cql balance [-token token_name]", + Description: "Get CovenantSQL balance of current account", +} + +func init() { + CmdBalance.Run = runBalance + + AddCommonFlags(CmdBalance) + CmdBalance.Flag.StringVar(&tokenName, "token", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") +} + +func runBalance(cmd *Command, args []string) { + configInit() + + var err error + if tokenName == "" { + var stableCoinBalance, covenantCoinBalance uint64 + + if stableCoinBalance, err = client.GetTokenBalance(types.Particle); err != nil { + ConsoleLog.WithError(err).Error("get Particle balance failed") + SetExitStatus(1) + return + } + if covenantCoinBalance, err = client.GetTokenBalance(types.Wave); err != nil { + ConsoleLog.WithError(err).Error("get Wave balance failed") + SetExitStatus(1) + return + } + + ConsoleLog.Infof("Particle balance is: %d", stableCoinBalance) + ConsoleLog.Infof("Wave balance is: %d", covenantCoinBalance) + } else { + var tokenBalance uint64 + tokenType := types.FromString(tokenName) + if !tokenType.Listed() { + values := make([]string, len(types.TokenList)) + for i := types.Particle; i < types.SupportTokenNumber; i++ { + values[i] = types.TokenList[i] + } + ConsoleLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", + strings.Join(values, ", ")) + SetExitStatus(1) + return + } + if tokenBalance, err = client.GetTokenBalance(tokenType); err != nil { + ConsoleLog.WithError(err).Error("get token balance failed") + SetExitStatus(1) + return + } + ConsoleLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) + } +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 2f2ffd7c6..03dd9ade7 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -55,8 +55,6 @@ var ( // dropDB string // database id to drop // updatePermission string // update user's permission on specific sqlchain // transferToken string // transfer token to target account -// getBalance bool // get balance of current account -// getBalanceWithTokenName string // get specific token's balance of current account // waitTxConfirmation bool // wait for transaction confirmation before exiting // // service *observer.Service @@ -98,13 +96,12 @@ func init() { // flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") // flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") - // flag.BoolVar(&getBalance, "get-balance", false, "Get balance of current account") - // flag.StringVar(&getBalanceWithTokenName, "token-balance", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") // flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") internal.CqlCommands = []*internal.Command{ internal.CmdConsole, internal.CmdVersion, + internal.CmdBalance, } } @@ -180,46 +177,6 @@ func main() { // } // // - // if getBalance { - // var stableCoinBalance, covenantCoinBalance uint64 - // - // if stableCoinBalance, err = client.GetTokenBalance(types.Particle); err != nil { - // internal.ConsoleLog.WithError(err).Error("get Particle balance failed") - // return - // } - // if covenantCoinBalance, err = client.GetTokenBalance(types.Wave); err != nil { - // internal.ConsoleLog.WithError(err).Error("get Wave balance failed") - // return - // } - // - // internal.ConsoleLog.Infof("Particle balance is: %d", stableCoinBalance) - // internal.ConsoleLog.Infof("Wave balance is: %d", covenantCoinBalance) - // - // return - // } - // - // if getBalanceWithTokenName != "" { - // var tokenBalance uint64 - // tokenType := types.FromString(getBalanceWithTokenName) - // if !tokenType.Listed() { - // values := make([]string, len(types.TokenList)) - // for i := types.Particle; i < types.SupportTokenNumber; i++ { - // values[i] = types.TokenList[i] - // } - // internal.ConsoleLog.Errorf("no such token supporting in CovenantSQL (what we support: %s)", - // strings.Join(values, ", ")) - // os.Exit(-1) - // return - // } - // if tokenBalance, err = client.GetTokenBalance(tokenType); err != nil { - // internal.ConsoleLog.WithError(err).Error("get token balance failed") - // os.Exit(-1) - // return - // } - // internal.ConsoleLog.Infof("%s balance is: %d", tokenType.String(), tokenBalance) - // return - // } - // // if dropDB != "" { // // drop database // if _, err := client.ParseDSN(dropDB); err != nil { From 2f59c1d1b558c670dece8ae752ecc419497b027c Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 15:50:50 +0800 Subject: [PATCH 101/244] Refactor createDB feature into create command. --- cmd/cql/internal/balance.go | 2 +- cmd/cql/internal/cfg.go | 30 ++++++++++++++- cmd/cql/internal/command.go | 12 +++--- cmd/cql/internal/create.go | 71 ++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 73 +------------------------------------ 5 files changed, 107 insertions(+), 81 deletions(-) diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 4bc2e437c..3d4c19f1c 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -19,7 +19,7 @@ var CmdBalance = &Command{ func init() { CmdBalance.Run = runBalance - AddCommonFlags(CmdBalance) + addCommonFlags(CmdBalance) CmdBalance.Flag.StringVar(&tokenName, "token", "", "Get specific token's balance of current account, e.g. Particle, Wave, and etc.") } diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index d9fa8958a..968c965cc 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -1,10 +1,16 @@ package internal import ( + "context" + "errors" + + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/sirupsen/logrus" ) // These are general flags used by console and other commands. @@ -12,11 +18,12 @@ var ( configFile string password string + waitTxConfirmation bool // wait for transaction confirmation before exiting + CmdName string ) -// AddCommonFlags adds the flags common to all commands. -func AddCommonFlags(cmd *Command) { +func addCommonFlags(cmd *Command) { cmd.Flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql") @@ -41,3 +48,22 @@ func configInit() { // duration. WaitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod } + +func addWaitFlag(cmd *Command) { + cmd.Flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") +} + +func wait(txHash hash.Hash) (err error) { + var ctx, cancel = context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) + defer cancel() + var state pi.TransactionState + state, err = client.WaitTxConfirmation(ctx, txHash) + ConsoleLog.WithFields(logrus.Fields{ + "tx_hash": txHash, + "tx_state": state, + }).WithError(err).Info("wait transaction confirmation") + if err == nil && state != pi.TransactionStateConfirmed { + err = errors.New("bad transaction state") + } + return +} diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index 6d5640fc5..3d06c17a9 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -50,7 +50,7 @@ var CmdConsole = &Command{ //TODO(laodouya) add web/adapter flag {command/filename} var ( - variables VarsFlag + variables varsFlag dsn string outFile string noRC bool @@ -62,7 +62,7 @@ var ( func init() { CmdConsole.Run = runConsole - AddCommonFlags(CmdConsole) + addCommonFlags(CmdConsole) CmdConsole.Flag.Var(&variables, "variable", "Set variable") CmdConsole.Flag.StringVar(&dsn, "dsn", "", "Database url") CmdConsole.Flag.StringVar(&outFile, "out", "", "Record stdout to file") @@ -116,20 +116,20 @@ func (t *SqTime) parse(s string) error { return errors.New("could not parse time") } -type VarsFlag struct { +type varsFlag struct { flag.Value vars []string } -func (v *VarsFlag) Get() []string { +func (v *varsFlag) Get() []string { return append([]string{}, v.vars...) } -func (v *VarsFlag) String() string { +func (v *varsFlag) String() string { return fmt.Sprintf("%#v", v.vars) } -func (v *VarsFlag) Set(value string) error { +func (v *varsFlag) Set(value string) error { v.vars = append(v.vars, value) return nil } diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index 5bf0569ce..43f01ad6c 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -1 +1,72 @@ package internal + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/CovenantSQL/CovenantSQL/client" +) + +var CmdCreate = &Command{ + UsageLine: "cql create [-wait-tx-confirm] [dbmeta/nodecount]", + Description: "Create CovenantSQL database by database metainfo or just number for node count", +} + +func init() { + CmdCreate.Run = runCreate + + addCommonFlags(CmdCreate) + addWaitFlag(CmdCreate) +} + +func runCreate(cmd *Command, args []string) { + configInit() + + if len(args) != 1 { + ConsoleLog.Error("Create command need database_meta_info string or node_count as params") + SetExitStatus(1) + return + } + metaStr := args[0] + // create database + // parse instance requirement + var meta client.ResourceMeta + + if err := json.Unmarshal([]byte(metaStr), &meta); err != nil { + // not a instance json, try if it is a number describing node count + nodeCnt, err := strconv.ParseUint(metaStr, 10, 16) + if err != nil { + // still failing + ConsoleLog.WithField("db", metaStr).Error("create database failed: invalid instance description") + SetExitStatus(1) + return + } + + meta = client.ResourceMeta{} + meta.Node = uint16(nodeCnt) + } + + txHash, dsn, err := client.Create(meta) + if err != nil { + ConsoleLog.WithError(err).Error("create database failed") + SetExitStatus(1) + return + } + + if waitTxConfirmation { + wait(txHash) + var ctx, cancel = context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) + defer cancel() + err = client.WaitDBCreation(ctx, dsn) + if err != nil { + ConsoleLog.WithError(err).Error("create database failed durating creation") + SetExitStatus(1) + return + } + } + + ConsoleLog.Infof("the newly created database is: %#v", dsn) + fmt.Printf(dsn) +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 03dd9ade7..3b124f76c 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -18,7 +18,6 @@ package main import ( - "context" "encoding/json" "flag" "fmt" @@ -27,13 +26,9 @@ import ( "strings" "time" - "github.com/pkg/errors" "github.com/sirupsen/logrus" - pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/types" ) @@ -41,9 +36,6 @@ import ( var ( version = "unknown" -// configFile string -// password string -// // // Shard chain explorer/adapter stuff // tmpPath string // background observer and explorer block and log file path // bgLogLevel string // background log level @@ -51,7 +43,6 @@ var ( // adapterAddr string // adapter listen addr // // // DML variables -// createDB string // as a instance meta json string or simply a node count // dropDB string // database id to drop // updatePermission string // update user's permission on specific sqlchain // transferToken string // transfer token to target account @@ -81,18 +72,12 @@ type tranToken struct { } func init() { - // flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, - // "Disable signature sign and verify, for testing") - // flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") - // flag.StringVar(&password, "password", "", "Master key password for covenantsql") - // // Explorer/Adapter // flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") // flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") // flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") // // DML flags - // flag.StringVar(&createDB, "create", "", "Create database, argument can be instance requirement json or simply a node count requirement") // flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") // flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") @@ -102,6 +87,7 @@ func init() { internal.CmdConsole, internal.CmdVersion, internal.CmdBalance, + internal.CmdCreate, } } @@ -202,48 +188,6 @@ func main() { // return // } // - // if createDB != "" { - // // create database - // // parse instance requirement - // var meta client.ResourceMeta - // - // if err := json.Unmarshal([]byte(createDB), &meta); err != nil { - // // not a instance json, try if it is a number describing node count - // nodeCnt, err := strconv.ParseUint(createDB, 10, 16) - // if err != nil { - // // still failing - // internal.ConsoleLog.WithField("db", createDB).Error("create database failed: invalid instance description") - // os.Exit(-1) - // return - // } - // - // meta = client.ResourceMeta{} - // meta.Node = uint16(nodeCnt) - // } - // - // txHash, dsn, err := client.Create(meta) - // if err != nil { - // internal.ConsoleLog.WithError(err).Error("create database failed") - // os.Exit(-1) - // return - // } - // - // if waitTxConfirmation { - // wait(txHash) - // var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) - // defer cancel() - // err = client.WaitDBCreation(ctx, dsn) - // if err != nil { - // internal.ConsoleLog.WithError(err).Error("create database failed durating creation") - // os.Exit(-1) - // } - // } - // - // internal.ConsoleLog.Infof("the newly created database is: %#v", dsn) - // fmt.Printf(dsn) - // return - // } - // // if updatePermission != "" { // // update user's permission on sqlchain // var perm userPermission @@ -381,21 +325,6 @@ func main() { //} } -func wait(txHash hash.Hash) (err error) { - var ctx, cancel = context.WithTimeout(context.Background(), internal.WaitTxConfirmationMaxDuration) - defer cancel() - var state pi.TransactionState - state, err = client.WaitTxConfirmation(ctx, txHash) - internal.ConsoleLog.WithFields(logrus.Fields{ - "tx_hash": txHash, - "tx_state": state, - }).WithError(err).Info("wait transaction confirmation") - if err == nil && state != pi.TransactionStateConfirmed { - err = errors.New("bad transaction state") - } - return -} - func mainUsage() { //TODO(laodouya) print stderr main usage os.Exit(2) From dc4fa1a962e0b64b4c92488fbf28f076cc1c44ea Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 16:04:54 +0800 Subject: [PATCH 102/244] Refactor dropDB feature into drop command. --- cmd/cql/internal/base.go | 2 +- cmd/cql/internal/cfg.go | 4 +-- cmd/cql/internal/command.go | 2 +- cmd/cql/internal/create.go | 2 +- cmd/cql/internal/drop.go | 51 +++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 30 +--------------------- 6 files changed, 57 insertions(+), 34 deletions(-) diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 4cdf3714f..3ff9e3eaf 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -12,7 +12,7 @@ import ( ) var ( - WaitTxConfirmationMaxDuration time.Duration + waitTxConfirmationMaxDuration time.Duration // ConsoleLog is logging for console. ConsoleLog *logrus.Logger diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 968c965cc..836e9cbeb 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -46,7 +46,7 @@ func configInit() { // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. - WaitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod + waitTxConfirmationMaxDuration = 20 * conf.GConf.BPPeriod } func addWaitFlag(cmd *Command) { @@ -54,7 +54,7 @@ func addWaitFlag(cmd *Command) { } func wait(txHash hash.Hash) (err error) { - var ctx, cancel = context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) defer cancel() var state pi.TransactionState state, err = client.WaitTxConfirmation(ctx, txHash) diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/command.go index 3d06c17a9..1f8a88739 100644 --- a/cmd/cql/internal/command.go +++ b/cmd/cql/internal/command.go @@ -207,7 +207,7 @@ func usqlRegister() { ConsoleLog.Infof("connecting to %#v", url.DSN) // wait for database to become ready - ctx, cancel := context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) + ctx, cancel := context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) defer cancel() if err = client.WaitDBCreation(ctx, url.DSN); err != nil { return diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index 43f01ad6c..bcdbadbbf 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -57,7 +57,7 @@ func runCreate(cmd *Command, args []string) { if waitTxConfirmation { wait(txHash) - var ctx, cancel = context.WithTimeout(context.Background(), WaitTxConfirmationMaxDuration) + var ctx, cancel = context.WithTimeout(context.Background(), waitTxConfirmationMaxDuration) defer cancel() err = client.WaitDBCreation(ctx, dsn) if err != nil { diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 5bf0569ce..683b4d46b 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -1 +1,52 @@ package internal + +import ( + "github.com/CovenantSQL/CovenantSQL/client" +) + +var CmdDrop = &Command{ + UsageLine: "cql drop [-wait-tx-confirm] [dsn/dbid]", + Description: "Drop CovenantSQL database by database id", +} + +func init() { + CmdDrop.Run = runDrop + + addCommonFlags(CmdDrop) + addWaitFlag(CmdDrop) +} + +func runDrop(cmd *Command, args []string) { + configInit() + + if len(args) != 1 { + ConsoleLog.Error("Drop command need CovenantSQL dsn or database_id string as param") + SetExitStatus(1) + return + } + dsn := args[0] + + // drop database + if _, err := client.ParseDSN(dsn); err != nil { + // not a dsn + cfg := client.NewConfig() + cfg.DatabaseID = dsn + dsn = cfg.FormatDSN() + } + + txHash, err := client.Drop(dsn) + if err != nil { + // drop database failed + ConsoleLog.WithField("db", dsn).WithError(err).Error("drop database failed") + SetExitStatus(1) + return + } + + if waitTxConfirmation { + wait(txHash) + } + + // drop database success + ConsoleLog.Infof("drop database %#v success", dsn) + return +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 3b124f76c..a122e08fb 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -43,10 +43,8 @@ var ( // adapterAddr string // adapter listen addr // // // DML variables -// dropDB string // database id to drop // updatePermission string // update user's permission on specific sqlchain // transferToken string // transfer token to target account -// waitTxConfirmation bool // wait for transaction confirmation before exiting // // service *observer.Service // httpServer *http.Server @@ -78,16 +76,15 @@ func init() { // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") // // DML flags - // flag.StringVar(&dropDB, "drop", "", "Drop database, argument should be a database id (without covenantsql:// scheme is acceptable)") // flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") - // flag.BoolVar(&waitTxConfirmation, "wait-tx-confirm", false, "Wait for transaction confirmation") internal.CqlCommands = []*internal.Command{ internal.CmdConsole, internal.CmdVersion, internal.CmdBalance, internal.CmdCreate, + internal.CmdDrop, } } @@ -163,31 +160,6 @@ func main() { // } // // - // if dropDB != "" { - // // drop database - // if _, err := client.ParseDSN(dropDB); err != nil { - // // not a dsn - // cfg := client.NewConfig() - // cfg.DatabaseID = dropDB - // dropDB = cfg.FormatDSN() - // } - // - // txHash, err := client.Drop(dropDB) - // if err != nil { - // // drop database failed - // internal.ConsoleLog.WithField("db", dropDB).WithError(err).Error("drop database failed") - // return - // } - // - // if waitTxConfirmation { - // wait(txHash) - // } - // - // // drop database success - // internal.ConsoleLog.Infof("drop database %#v success", dropDB) - // return - // } - // // if updatePermission != "" { // // update user's permission on sqlchain // var perm userPermission From 292137c7bc4b8a599168d2ad4627d7e2da83b20e Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 16:29:17 +0800 Subject: [PATCH 103/244] Fail while cql drop command could not wait tx confirm. --- cmd/cql/internal/drop.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 683b4d46b..7f365ab58 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -6,7 +6,7 @@ import ( var CmdDrop = &Command{ UsageLine: "cql drop [-wait-tx-confirm] [dsn/dbid]", - Description: "Drop CovenantSQL database by database id", + Description: "Drop CovenantSQL database by dsn or database id", } func init() { @@ -29,9 +29,9 @@ func runDrop(cmd *Command, args []string) { // drop database if _, err := client.ParseDSN(dsn); err != nil { // not a dsn - cfg := client.NewConfig() - cfg.DatabaseID = dsn - dsn = cfg.FormatDSN() + ConsoleLog.WithField("db", dsn).WithError(err).Error("Not a valid dsn") + SetExitStatus(1) + return } txHash, err := client.Drop(dsn) @@ -43,7 +43,12 @@ func runDrop(cmd *Command, args []string) { } if waitTxConfirmation { - wait(txHash) + err = wait(txHash) + if err != nil { + ConsoleLog.WithField("db", dsn).WithError(err).Error("drop database failed") + SetExitStatus(1) + return + } } // drop database success From 85132107c4fe1fc66619b4ebfb2f1903da10892f Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 16:51:06 +0800 Subject: [PATCH 104/244] Refactor updatePermission feature into permission command. --- cmd/cql/internal/{command.go => console.go} | 0 cmd/cql/internal/permission.go | 92 +++++++++++++++++++++ cmd/cql/main.go | 69 +--------------- 3 files changed, 93 insertions(+), 68 deletions(-) rename cmd/cql/internal/{command.go => console.go} (100%) diff --git a/cmd/cql/internal/command.go b/cmd/cql/internal/console.go similarity index 100% rename from cmd/cql/internal/command.go rename to cmd/cql/internal/console.go diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index 5bf0569ce..1b0921ba3 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -1 +1,93 @@ package internal + +import ( + "encoding/json" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" +) + +var CmdPermission = &Command{ + UsageLine: "cql permission [-wait-tx-confirm] [perm_meta]", + Description: "Update user's permission on specific sqlchain", +} + +func init() { + CmdPermission.Run = runPermission + + addCommonFlags(CmdPermission) + addWaitFlag(CmdPermission) +} + +type userPermission struct { + TargetChain proto.AccountAddress `json:"chain"` + TargetUser proto.AccountAddress `json:"user"` + Perm json.RawMessage `json:"perm"` +} + +type userPermPayload struct { + // User role to access database. + Role types.UserPermissionRole `json:"role"` + // SQL pattern regulations for user queries + // only a fully matched (case-sensitive) sql query is permitted to execute. + Patterns []string `json:"patterns"` +} + +func runPermission(cmd *Command, args []string) { + configInit() + + if len(args) != 1 { + ConsoleLog.Error("Permission command need CovenantSQL perm_meta json string as param") + SetExitStatus(1) + return + } + + updatePermission := args[0] + // update user's permission on sqlchain + var perm userPermission + if err := json.Unmarshal([]byte(updatePermission), &perm); err != nil { + ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") + SetExitStatus(1) + return + } + + var permPayload userPermPayload + + if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { + // try again using role string representation + if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { + ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") + SetExitStatus(1) + return + } + } + + p := &types.UserPermission{ + Role: permPayload.Role, + Patterns: permPayload.Patterns, + } + + if !p.IsValid() { + ConsoleLog.Errorf("update permission failed: invalid permission description") + SetExitStatus(1) + return + } + + txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) + if err != nil { + ConsoleLog.WithError(err).Error("update permission failed") + SetExitStatus(1) + return + } + + if waitTxConfirmation { + err = wait(txHash) + if err != nil { + SetExitStatus(1) + return + } + } + + ConsoleLog.Info("succeed in sending transaction to CovenantSQL") +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index a122e08fb..d065a3cd9 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -18,7 +18,6 @@ package main import ( - "encoding/json" "flag" "fmt" "math/rand" @@ -30,7 +29,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/types" ) var ( @@ -43,27 +41,12 @@ var ( // adapterAddr string // adapter listen addr // // // DML variables -// updatePermission string // update user's permission on specific sqlchain // transferToken string // transfer token to target account // // service *observer.Service // httpServer *http.Server ) -type userPermission struct { - TargetChain proto.AccountAddress `json:"chain"` - TargetUser proto.AccountAddress `json:"user"` - Perm json.RawMessage `json:"perm"` -} - -type userPermPayload struct { - // User role to access database. - Role types.UserPermissionRole `json:"role"` - // SQL pattern regulations for user queries - // only a fully matched (case-sensitive) sql query is permitted to execute. - Patterns []string `json:"patterns"` -} - type tranToken struct { TargetUser proto.AccountAddress `json:"addr"` Amount string `json:"amount"` @@ -76,7 +59,6 @@ func init() { // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") // // DML flags - // flag.StringVar(&updatePermission, "update-perm", "", "Update user's permission on specific sqlchain") // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") internal.CqlCommands = []*internal.Command{ @@ -85,6 +67,7 @@ func init() { internal.CmdBalance, internal.CmdCreate, internal.CmdDrop, + internal.CmdPermission, } } @@ -160,56 +143,6 @@ func main() { // } // // - // if updatePermission != "" { - // // update user's permission on sqlchain - // var perm userPermission - // if err := json.Unmarshal([]byte(updatePermission), &perm); err != nil { - // internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") - // os.Exit(-1) - // return - // } - // - // var permPayload userPermPayload - // - // if err := json.Unmarshal(perm.Perm, &permPayload); err != nil { - // // try again using role string representation - // if err := json.Unmarshal(perm.Perm, &permPayload.Role); err != nil { - // internal.ConsoleLog.WithError(err).Errorf("update permission failed: invalid permission description") - // os.Exit(-1) - // return - // } - // } - // - // p := &types.UserPermission{ - // Role: permPayload.Role, - // Patterns: permPayload.Patterns, - // } - // - // if !p.IsValid() { - // internal.ConsoleLog.Errorf("update permission failed: invalid permission description") - // os.Exit(-1) - // return - // } - // - // txHash, err := client.UpdatePermission(perm.TargetUser, perm.TargetChain, p) - // if err != nil { - // internal.ConsoleLog.WithError(err).Error("update permission failed") - // os.Exit(-1) - // return - // } - // - // if waitTxConfirmation { - // err = wait(txHash) - // if err != nil { - // os.Exit(-1) - // return - // } - // } - // - // internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") - // return - // } - // // if transferToken != "" { // // transfer token // var tran tranToken From eb6f56200c73e1be653da8e279bd0517f827c528 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 16:58:56 +0800 Subject: [PATCH 105/244] Refactor transferToken feature into transfer command. --- cmd/cql/internal/transfer.go | 94 ++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 71 +-------------------------- 2 files changed, 95 insertions(+), 70 deletions(-) diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index 5bf0569ce..3ffac75f9 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -1 +1,95 @@ package internal + +import ( + "encoding/json" + "regexp" + "strconv" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/types" +) + +var CmdTransfer = &Command{ + UsageLine: "cql transfer [-wait-tx-confirm] [meta_json]", + Description: "Transfer token to target account", +} + +func init() { + CmdTransfer.Run = runTransfer + + addCommonFlags(CmdTransfer) + addWaitFlag(CmdTransfer) +} + +type tranToken struct { + TargetUser proto.AccountAddress `json:"addr"` + Amount string `json:"amount"` +} + +func runTransfer(cmd *Command, args []string) { + configInit() + + if len(args) != 1 { + ConsoleLog.Error("Transfer command need target user and token amount in json string as param") + SetExitStatus(1) + return + } + + transferStr := args[0] + + // transfer token + var tran tranToken + if err := json.Unmarshal([]byte(transferStr), &tran); err != nil { + ConsoleLog.WithError(err).Errorf("transfer token failed: invalid transfer description") + SetExitStatus(1) + return + } + + var validAmount = regexp.MustCompile(`^([0-9]+) *([a-zA-Z]+)$`) + if !validAmount.MatchString(tran.Amount) { + ConsoleLog.Error("transfer token failed: invalid transfer description") + SetExitStatus(1) + return + } + amountUnit := validAmount.FindStringSubmatch(tran.Amount) + if len(amountUnit) != 3 { + ConsoleLog.Error("transfer token failed: invalid transfer description") + for _, v := range amountUnit { + ConsoleLog.Error(v) + } + SetExitStatus(1) + return + } + amount, err := strconv.ParseUint(amountUnit[1], 10, 64) + if err != nil { + ConsoleLog.Error("transfer token failed: invalid token amount") + SetExitStatus(1) + return + } + unit := types.FromString(amountUnit[2]) + if !unit.Listed() { + ConsoleLog.Error("transfer token failed: invalid token type") + SetExitStatus(1) + return + } + + var txHash hash.Hash + txHash, err = client.TransferToken(tran.TargetUser, amount, unit) + if err != nil { + ConsoleLog.WithError(err).Error("transfer token failed") + SetExitStatus(1) + return + } + + if waitTxConfirmation { + err = wait(txHash) + if err != nil { + SetExitStatus(1) + return + } + } + + ConsoleLog.Info("succeed in sending transaction to CovenantSQL") +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index d065a3cd9..cb47ffd7b 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -28,7 +28,6 @@ import ( "github.com/sirupsen/logrus" "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" - "github.com/CovenantSQL/CovenantSQL/proto" ) var ( @@ -40,27 +39,16 @@ var ( // explorerAddr string // explorer Web addr // adapterAddr string // adapter listen addr // -// // DML variables -// transferToken string // transfer token to target account -// // service *observer.Service // httpServer *http.Server ) -type tranToken struct { - TargetUser proto.AccountAddress `json:"addr"` - Amount string `json:"amount"` -} - func init() { // // Explorer/Adapter // flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") // flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") // flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") - // // DML flags - // flag.StringVar(&transferToken, "transfer", "", "Transfer token to target account") - internal.CqlCommands = []*internal.Command{ internal.CmdConsole, internal.CmdVersion, @@ -68,6 +56,7 @@ func init() { internal.CmdCreate, internal.CmdDrop, internal.CmdPermission, + internal.CmdTransfer, } } @@ -142,64 +131,6 @@ func main() { // } // } // - // - // if transferToken != "" { - // // transfer token - // var tran tranToken - // if err := json.Unmarshal([]byte(transferToken), &tran); err != nil { - // internal.ConsoleLog.WithError(err).Errorf("transfer token failed: invalid transfer description") - // os.Exit(-1) - // return - // } - // - // var validAmount = regexp.MustCompile(`^([0-9]+) *([a-zA-Z]+)$`) - // if !validAmount.MatchString(tran.Amount) { - // internal.ConsoleLog.Error("transfer token failed: invalid transfer description") - // os.Exit(-1) - // return - // } - // amountUnit := validAmount.FindStringSubmatch(tran.Amount) - // if len(amountUnit) != 3 { - // internal.ConsoleLog.Error("transfer token failed: invalid transfer description") - // for _, v := range amountUnit { - // internal.ConsoleLog.Error(v) - // } - // os.Exit(-1) - // return - // } - // amount, err := strconv.ParseUint(amountUnit[1], 10, 64) - // if err != nil { - // internal.ConsoleLog.Error("transfer token failed: invalid token amount") - // os.Exit(-1) - // return - // } - // unit := types.FromString(amountUnit[2]) - // if !unit.Listed() { - // internal.ConsoleLog.Error("transfer token failed: invalid token type") - // os.Exit(-1) - // return - // } - // - // var txHash hash.Hash - // txHash, err = client.TransferToken(tran.TargetUser, amount, unit) - // if err != nil { - // internal.ConsoleLog.WithError(err).Error("transfer token failed") - // os.Exit(-1) - // return - // } - // - // if waitTxConfirmation { - // err = wait(txHash) - // if err != nil { - // os.Exit(-1) - // return - // } - // } - // - // internal.ConsoleLog.Info("succeed in sending transaction to CovenantSQL") - // return - // } - for _, cmd := range internal.CqlCommands { if cmd.Name() != args[0] { continue From 93b77dda56f4811c9ba9a6041dacc102e01b7a20 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 15 Mar 2019 17:29:53 +0800 Subject: [PATCH 106/244] Move ConsoleLog init into package. --- cmd/cql/internal/base.go | 4 ++++ cmd/cql/internal/help.go | 14 ++++++++++++-- cmd/cql/main.go | 6 +----- 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 3ff9e3eaf..785387be7 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -21,6 +21,10 @@ var ( CqlCommands []*Command ) +func init() { + ConsoleLog = logrus.New() +} + // A Command is an implementation of a cql command // like cql create or cql transfer. type Command struct { diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 1b6549f49..e6b2a6d26 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -21,7 +21,17 @@ func init() { CmdVersion.Run = runVersion } -func runVersion(cmd *Command, args []string) { - fmt.Printf("%v %v %v %v %v\n", +func PrintVersion(printLog bool) string { + version := fmt.Sprintf("%v %v %v %v %v\n", name, Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) + + if printLog { + ConsoleLog.Infof("cql build: %s", version) + } + + return version +} + +func runVersion(cmd *Command, args []string) { + fmt.Print(PrintVersion(false)) } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index cb47ffd7b..1cf827cda 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -25,8 +25,6 @@ import ( "strings" "time" - "github.com/sirupsen/logrus" - "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" ) @@ -81,9 +79,7 @@ func main() { return } - internal.ConsoleLog = logrus.New() - - internal.ConsoleLog.Infof("cql build: %#v\n", internal.Version) + internal.PrintVersion(true) // // if tmpPath == "" { // tmpPath = os.TempDir() From bbef27864c565b076c6f9f980246db0877438497 Mon Sep 17 00:00:00 2001 From: laodouya Date: Sat, 16 Mar 2019 23:23:03 +0800 Subject: [PATCH 107/244] Refactor explorer feature into cql web command. --- cmd/cql/internal/cfg.go | 28 ++++++++++++++++++++- cmd/cql/internal/web.go | 54 +++++++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 39 ++--------------------------- 3 files changed, 83 insertions(+), 38 deletions(-) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 836e9cbeb..5c1eb9f8e 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -3,6 +3,8 @@ package internal import ( "context" "errors" + "os" + "path/filepath" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" @@ -10,6 +12,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/sirupsen/logrus" ) @@ -19,6 +22,9 @@ var ( password string waitTxConfirmation bool // wait for transaction confirmation before exiting + // Shard chain explorer/adapter stuff + tmpPath string // background observer and explorer block and log file path + bgLogLevel string // background log level CmdName string ) @@ -40,7 +46,6 @@ func configInit() { ConsoleLog.WithError(err).Error("init covenantsql client failed") SetExitStatus(1) Exit() - return } // TODO(leventeliu): discover more specific confirmation duration from config. We don't have @@ -67,3 +72,24 @@ func wait(txHash hash.Hash) (err error) { } return } + +func addBgServerFlag(cmd *Command) { + cmd.Flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") + cmd.Flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") +} + +func bgServerInit() { + if tmpPath == "" { + tmpPath = os.TempDir() + } + logPath := filepath.Join(tmpPath, "covenant_service.log") + bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + ConsoleLog.Errorf("open log file failed: %s, %v", logPath, err) + SetExitStatus(1) + Exit() + } + + log.SetOutput(bgLog) + log.SetStringLevel(bgLogLevel, log.InfoLevel) +} diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 5bf0569ce..d21b3a841 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -1 +1,55 @@ package internal + +import ( + "net/http" + + "github.com/CovenantSQL/CovenantSQL/sqlchain/observer" + "github.com/CovenantSQL/CovenantSQL/utils" +) + +var ( + webAddr string // Web addr + + webService *observer.Service + webHTTPServer *http.Server +) + +var CmdWeb = &Command{ + UsageLine: "cql web [-tmp-path path] [-bg-log-level level] [address]", + Description: "Web command serve a database chain explorer, e.g. :8546", +} + +func init() { + CmdWeb.Run = runWeb + + addCommonFlags(CmdWeb) + addBgServerFlag(CmdWeb) +} + +func runWeb(cmd *Command, args []string) { + configInit() + bgServerInit() + + if len(args) != 1 { + ConsoleLog.Error("Web command need listern address as param") + SetExitStatus(1) + return + } + webAddr = args[0] + + var err error + webService, webHTTPServer, err = observer.StartObserver(webAddr, Version) + if err != nil { + ConsoleLog.WithError(err).Error("start explorer failed") + SetExitStatus(1) + return + } + + defer func() { + _ = observer.StopObserver(webService, webHTTPServer) + ConsoleLog.Info("explorer stopped") + }() + + ConsoleLog.Printf("Ctrl + C to stop web server on %s", webAddr) + <-utils.WaitForExit() +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 1cf827cda..993322c54 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -31,20 +31,11 @@ import ( var ( version = "unknown" -// // Shard chain explorer/adapter stuff -// tmpPath string // background observer and explorer block and log file path -// bgLogLevel string // background log level -// explorerAddr string // explorer Web addr // adapterAddr string // adapter listen addr -// -// service *observer.Service -// httpServer *http.Server ) func init() { - // // Explorer/Adapter - // flag.StringVar(&tmpPath, "tmp-path", "", "Background service temp file path, use os.TempDir for default") - // flag.StringVar(&bgLogLevel, "bg-log-level", "", "Background service log level") // flag.StringVar(&explorerAddr, "web", "", "Address to serve a database chain explorer, e.g. :8546") + // Adapter // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") internal.CqlCommands = []*internal.Command{ @@ -55,6 +46,7 @@ func init() { internal.CmdDrop, internal.CmdPermission, internal.CmdTransfer, + internal.CmdWeb, } } @@ -80,33 +72,6 @@ func main() { } internal.PrintVersion(true) - // - // if tmpPath == "" { - // tmpPath = os.TempDir() - // } - // logPath := filepath.Join(tmpPath, "covenant_service.log") - // bgLog, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) - // if err != nil { - // fmt.Fprintf(os.Stderr, "open log file failed: %s, %v", logPath, err) - // os.Exit(-1) - // } - // log.SetOutput(bgLog) - // log.SetStringLevel(bgLogLevel, log.InfoLevel) - // - // if explorerAddr != "" { - // service, httpServer, err = observer.StartObserver(explorerAddr, internal.Version) - // if err != nil { - // log.WithError(err).Fatal("start explorer failed") - // } else { - // internal.ConsoleLog.Infof("explorer started on %s", explorerAddr) - // } - // - // defer func() { - // _ = observer.StopObserver(service, httpServer) - // log.Info("explorer stopped") - // }() - // } - // // if adapterAddr != "" { // server, err := adapter.NewHTTPAdapter(adapterAddr, configFile) // if err != nil { From b49fee4ff1f785f5cc667041546464d26c3a49d7 Mon Sep 17 00:00:00 2001 From: laodouya Date: Sat, 16 Mar 2019 23:35:47 +0800 Subject: [PATCH 108/244] Refactor adapter feature into cql adapter command. --- cmd/cql/internal/adapter.go | 64 +++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 27 ++-------------- 2 files changed, 66 insertions(+), 25 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 5bf0569ce..9401240db 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -1 +1,65 @@ package internal + +import ( + "context" + "net/http" + "time" + + "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter" + "github.com/CovenantSQL/CovenantSQL/utils" +) + +var ( + adapterAddr string // adapter listen addr + + adapterHTTPServer *http.Server +) + +var CmdAdapter = &Command{ + UsageLine: "cql adapter [-tmp-path path] [-bg-log-level level] [address]", + Description: "Adapter command serve a database chain adapter, e.g. :7784", +} + +func init() { + CmdAdapter.Run = runAdapter + + addCommonFlags(CmdAdapter) + addBgServerFlag(CmdAdapter) +} + +func runAdapter(cmd *Command, args []string) { + configInit() + bgServerInit() + + if len(args) != 1 { + ConsoleLog.Error("Adapter command need listern address as param") + SetExitStatus(1) + return + } + adapterAddr = args[0] + + adapterHTTPServer, err := adapter.NewHTTPAdapter(adapterAddr, configFile) + if err != nil { + ConsoleLog.WithError(err).Error("init adapter failed") + SetExitStatus(1) + return + } + + if err = adapterHTTPServer.Serve(); err != nil { + ConsoleLog.WithError(err).Error("start adapter failed") + SetExitStatus(1) + return + } + + ConsoleLog.Infof("adapter started on %s", adapterAddr) + + defer func() { + ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) + defer cancel() + adapterHTTPServer.Shutdown(ctx) + ConsoleLog.Info("stopped adapter") + }() + + ConsoleLog.Printf("Ctrl + C to stop adapter server on %s", adapterAddr) + <-utils.WaitForExit() +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 993322c54..606601a00 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -30,14 +30,9 @@ import ( var ( version = "unknown" - -// adapterAddr string // adapter listen addr ) func init() { - // Adapter - // flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") - internal.CqlCommands = []*internal.Command{ internal.CmdConsole, internal.CmdVersion, @@ -47,6 +42,7 @@ func init() { internal.CmdPermission, internal.CmdTransfer, internal.CmdWeb, + internal.CmdAdapter, } } @@ -72,26 +68,7 @@ func main() { } internal.PrintVersion(true) - // if adapterAddr != "" { - // server, err := adapter.NewHTTPAdapter(adapterAddr, configFile) - // if err != nil { - // log.WithError(err).Fatal("init adapter failed") - // } - // - // if err = server.Serve(); err != nil { - // log.WithError(err).Fatal("start adapter failed") - // } else { - // internal.ConsoleLog.Infof("adapter started on %s", adapterAddr) - // - // defer func() { - // ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) - // defer cancel() - // server.Shutdown(ctx) - // log.Info("stopped adapter") - // }() - // } - // } - // + for _, cmd := range internal.CqlCommands { if cmd.Name() != args[0] { continue From 3a04bf759f03ca0a0f4898f1a4031fbe4be566ac Mon Sep 17 00:00:00 2001 From: laodouya Date: Sun, 17 Mar 2019 20:53:48 +0800 Subject: [PATCH 109/244] Add new line after one console output. --- cmd/cql/internal/adapter.go | 2 +- cmd/cql/internal/web.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 9401240db..42b8910a6 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -60,6 +60,6 @@ func runAdapter(cmd *Command, args []string) { ConsoleLog.Info("stopped adapter") }() - ConsoleLog.Printf("Ctrl + C to stop adapter server on %s", adapterAddr) + ConsoleLog.Printf("Ctrl + C to stop adapter server on %s\n", adapterAddr) <-utils.WaitForExit() } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index d21b3a841..838f6c886 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -50,6 +50,6 @@ func runWeb(cmd *Command, args []string) { ConsoleLog.Info("explorer stopped") }() - ConsoleLog.Printf("Ctrl + C to stop web server on %s", webAddr) + ConsoleLog.Printf("Ctrl + C to stop web server on %s\n", webAddr) <-utils.WaitForExit() } From f7af2a4d9ea65569e6670817f0de93cb1587ed56 Mon Sep 17 00:00:00 2001 From: laodouya Date: Sun, 17 Mar 2019 22:07:35 +0800 Subject: [PATCH 110/244] Change web and adapter start process into a func. --- cmd/cql/internal/adapter.go | 36 +++++++++++++++++++++--------------- cmd/cql/internal/web.go | 30 ++++++++++++++++++------------ 2 files changed, 39 insertions(+), 27 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 42b8910a6..acfa4f0e5 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -27,38 +27,44 @@ func init() { addBgServerFlag(CmdAdapter) } -func runAdapter(cmd *Command, args []string) { - configInit() - bgServerInit() - - if len(args) != 1 { - ConsoleLog.Error("Adapter command need listern address as param") - SetExitStatus(1) - return - } - adapterAddr = args[0] - +func startAdapterServer(adapterAddr string) func() { adapterHTTPServer, err := adapter.NewHTTPAdapter(adapterAddr, configFile) if err != nil { ConsoleLog.WithError(err).Error("init adapter failed") SetExitStatus(1) - return + return nil } if err = adapterHTTPServer.Serve(); err != nil { ConsoleLog.WithError(err).Error("start adapter failed") SetExitStatus(1) - return + return nil } ConsoleLog.Infof("adapter started on %s", adapterAddr) - defer func() { + return func() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() adapterHTTPServer.Shutdown(ctx) ConsoleLog.Info("stopped adapter") - }() + } +} + +func runAdapter(cmd *Command, args []string) { + configInit() + bgServerInit() + + if len(args) != 1 { + ConsoleLog.Error("Adapter command need listern address as param") + SetExitStatus(1) + return + } + adapterAddr = args[0] + + cancelFunc := startAdapterServer(adapterAddr) + ExitIfErrors() + defer cancelFunc() ConsoleLog.Printf("Ctrl + C to stop adapter server on %s\n", adapterAddr) <-utils.WaitForExit() diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 838f6c886..9ef9452e3 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -26,6 +26,21 @@ func init() { addBgServerFlag(CmdWeb) } +func startWebServer(webAddr string) func() { + var err error + webService, webHTTPServer, err = observer.StartObserver(webAddr, Version) + if err != nil { + ConsoleLog.WithError(err).Error("start explorer failed") + SetExitStatus(1) + return nil + } + + return func() { + _ = observer.StopObserver(webService, webHTTPServer) + ConsoleLog.Info("explorer stopped") + } +} + func runWeb(cmd *Command, args []string) { configInit() bgServerInit() @@ -37,18 +52,9 @@ func runWeb(cmd *Command, args []string) { } webAddr = args[0] - var err error - webService, webHTTPServer, err = observer.StartObserver(webAddr, Version) - if err != nil { - ConsoleLog.WithError(err).Error("start explorer failed") - SetExitStatus(1) - return - } - - defer func() { - _ = observer.StopObserver(webService, webHTTPServer) - ConsoleLog.Info("explorer stopped") - }() + cancelFunc := startWebServer(webAddr) + ExitIfErrors() + defer cancelFunc() ConsoleLog.Printf("Ctrl + C to stop web server on %s\n", webAddr) <-utils.WaitForExit() From 29bd64b2c613d7c684db65f65c04759107ce0b39 Mon Sep 17 00:00:00 2001 From: laodouya Date: Sun, 17 Mar 2019 22:23:33 +0800 Subject: [PATCH 111/244] Add web and adapter params for console command. --- cmd/cql/internal/adapter.go | 2 +- cmd/cql/internal/console.go | 18 ++++++++++++++++++ cmd/cql/internal/web.go | 2 ++ cmd/cql/main.go | 7 ------- 4 files changed, 21 insertions(+), 8 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index acfa4f0e5..c921ae5bc 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -47,7 +47,7 @@ func startAdapterServer(adapterAddr string) func() { ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() adapterHTTPServer.Shutdown(ctx) - ConsoleLog.Info("stopped adapter") + ConsoleLog.Info("adapter stopped") } } diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 1f8a88739..321799921 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -41,6 +41,7 @@ import ( "github.com/xo/usql/text" "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/utils" ) var CmdConsole = &Command{ @@ -70,6 +71,8 @@ func init() { CmdConsole.Flag.BoolVar(&singleTransaction, "single-transaction", false, "Execute as a single transaction (if non-interactive)") CmdConsole.Flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") CmdConsole.Flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") + CmdConsole.Flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") + CmdConsole.Flag.StringVar(&webAddr, "web", "", "Address serve a database chain explorer, e.g. :8546") } // SqTime provides a type that will correctly scan the various timestamps @@ -351,6 +354,16 @@ func runConsole(cmd *Command, args []string) { } } + if adapterAddr != "" { + cancelFunc := startAdapterServer(adapterAddr) + defer cancelFunc() + } + + if webAddr != "" { + cancelFunc := startWebServer(webAddr) + defer cancelFunc() + } + // run err := run(curUser) ExitIfErrors() @@ -367,4 +380,9 @@ func runConsole(cmd *Command, args []string) { SetExitStatus(1) return } + + if adapterAddr != "" || webAddr != "" { + ConsoleLog.Printf("Ctrl + C to stop background server on %s %s\n", adapterAddr, webAddr) + <-utils.WaitForExit() + } } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 9ef9452e3..2bfef3d88 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -35,6 +35,8 @@ func startWebServer(webAddr string) func() { return nil } + ConsoleLog.Infof("web server started on %s", webAddr) + return func() { _ = observer.StopObserver(webService, webHTTPServer) ConsoleLog.Info("explorer stopped") diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 606601a00..4a2e4d100 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -90,13 +90,6 @@ func main() { fmt.Fprintf(os.Stderr, "cql %s: unknown command\nRun 'cql help%s' for usage.\n", internal.CmdName, helpArg) internal.SetExitStatus(2) internal.Exit() - - // if web flag is enabled - //if explorerAddr != "" || adapterAddr != "" { - // fmt.Printf("Ctrl + C to stop explorer on %s and adapter on %s\n", explorerAddr, adapterAddr) - // <-utils.WaitForExit() - // return - //} } func mainUsage() { From 70ec1fa58e76dcad6f7166630ad83458ecf059d2 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 10:59:37 +0800 Subject: [PATCH 112/244] Make golint happy. --- cmd/cql/internal/adapter.go | 1 + cmd/cql/internal/balance.go | 1 + cmd/cql/internal/base.go | 6 +++++- cmd/cql/internal/cfg.go | 1 + cmd/cql/internal/console.go | 1 + cmd/cql/internal/create.go | 1 + cmd/cql/internal/drop.go | 1 + cmd/cql/internal/help.go | 1 + cmd/cql/internal/permission.go | 1 + cmd/cql/internal/transfer.go | 1 + cmd/cql/internal/web.go | 1 + 11 files changed, 15 insertions(+), 1 deletion(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index c921ae5bc..24a638fc1 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -15,6 +15,7 @@ var ( adapterHTTPServer *http.Server ) +// CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ UsageLine: "cql adapter [-tmp-path path] [-bg-log-level level] [address]", Description: "Adapter command serve a database chain adapter, e.g. :7784", diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 3d4c19f1c..7a9688b2a 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -11,6 +11,7 @@ var ( tokenName string // get specific token's balance of current account ) +// CmdBalance is cql balance command entity. var CmdBalance = &Command{ UsageLine: "cql balance [-token token_name]", Description: "Get CovenantSQL balance of current account", diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 785387be7..ae829b350 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -64,6 +64,7 @@ func (c *Command) Name() string { return name } +// Usage print base usage help info. func (c *Command) Usage() { fmt.Fprintf(os.Stderr, "usage: %s\n", c.UsageLine) fmt.Fprintf(os.Stderr, "Run 'cql help %s' for details.\n", c.LongName()) @@ -78,10 +79,11 @@ func (c *Command) Runnable() bool { var atExitFuncs []func() -func AtExit(f func()) { +func atExit(f func()) { atExitFuncs = append(atExitFuncs, f) } +// Exit will run all exit funcs and then return with exitStatus func Exit() { for _, f := range atExitFuncs { f() @@ -89,6 +91,7 @@ func Exit() { os.Exit(exitStatus) } +// ExitIfErrors will call Exit() if exitStatus is not 0 func ExitIfErrors() { if exitStatus != 0 { Exit() @@ -98,6 +101,7 @@ func ExitIfErrors() { var exitStatus = 0 var exitMu sync.Mutex +// SetExitStatus provide thread safe set exit status func. func SetExitStatus(n int) { exitMu.Lock() if exitStatus < n { diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 5c1eb9f8e..1db4b2c08 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -26,6 +26,7 @@ var ( tmpPath string // background observer and explorer block and log file path bgLogLevel string // background log level + // CmdName stores cql command strings for error messages. CmdName string ) diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 321799921..6e47b8bbf 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -44,6 +44,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" ) +// CmdConsole is cql console command entity. var CmdConsole = &Command{ UsageLine: "cql console [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables]", Description: "run a console for realtime sql operation", diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index bcdbadbbf..f7b6df28f 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -9,6 +9,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" ) +// CmdCreate is cql create command entity. var CmdCreate = &Command{ UsageLine: "cql create [-wait-tx-confirm] [dbmeta/nodecount]", Description: "Create CovenantSQL database by database metainfo or just number for node count", diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 7f365ab58..32c1b4e01 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -4,6 +4,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/client" ) +// CmdDrop is cql drop command entity. var CmdDrop = &Command{ UsageLine: "cql drop [-wait-tx-confirm] [dsn/dbid]", Description: "Drop CovenantSQL database by dsn or database id", diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index e6b2a6d26..386367306 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -12,6 +12,7 @@ var ( Version = "unknown" ) +// CmdVersion is cql version command entity. var CmdVersion = &Command{ UsageLine: "cql version", Description: "Show cql build version infomation", diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index 1b0921ba3..c71fe8bc3 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -8,6 +8,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) +// CmdPermission is cql permission command entity. var CmdPermission = &Command{ UsageLine: "cql permission [-wait-tx-confirm] [perm_meta]", Description: "Update user's permission on specific sqlchain", diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index 3ffac75f9..b3d6025bf 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -11,6 +11,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) +// CmdTransfer is cql transfer command entity. var CmdTransfer = &Command{ UsageLine: "cql transfer [-wait-tx-confirm] [meta_json]", Description: "Transfer token to target account", diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 2bfef3d88..78ec992b1 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -14,6 +14,7 @@ var ( webHTTPServer *http.Server ) +// CmdWeb is cql web command. var CmdWeb = &Command{ UsageLine: "cql web [-tmp-path path] [-bg-log-level level] [address]", Description: "Web command serve a database chain explorer, e.g. :8546", From 2c5d31c08ae94668af6e2f66cf0146dc1af601b7 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 11:19:02 +0800 Subject: [PATCH 113/244] Fix cql test case. --- bin/docker-entry.sh | 6 +++--- cmd/cql/internal/help.go | 1 + sqlchain/observer/observation_test.go | 8 ++++---- test/compatibility/specific_old.sh | 8 ++++---- test/testnet_client/run.sh | 12 ++++++------ 5 files changed, 18 insertions(+), 17 deletions(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index d5040bfe6..105107c27 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -12,16 +12,16 @@ blockproducer) exec /app/cqld -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; observer) - exec /app/cql -config "${COVENANT_CONF}" -web "${COVENANTSQL_OBSERVER_ADDR}" "${@}" + exec /app/cql web -config "${COVENANT_CONF}" "${COVENANTSQL_OBSERVER_ADDR}" "${@}" ;; adapter) - exec /app/cql -config "${COVENANT_CONF}" -adapter "${COVENANTSQL_ADAPTER_ADDR}" "${@}" + exec /app/cql adapter -config "${COVENANT_CONF}" "${COVENANTSQL_ADAPTER_ADDR}" "${@}" ;; mysql-adapter) exec /app/cql-mysql-adapter -config "${COVENANT_CONF}" "${@}" ;; cli) - exec /app/cql -config ${COVENANT_CONF} "${@}" + exec /app/cql console -config ${COVENANT_CONF} "${@}" ;; faucet) exec /app/cql-faucet -config ${COVENANT_CONF} "${@}" diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 386367306..d7fd1bf13 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -22,6 +22,7 @@ func init() { CmdVersion.Run = runVersion } +// PrintVersion prints program git version. func PrintVersion(printLog bool) string { version := fmt.Sprintf("%v %v %v %v %v\n", name, Version, runtime.GOOS, runtime.GOARCH, runtime.Version()) diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index f9a95ab31..8ed27bcb9 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -488,10 +488,10 @@ func TestFullProcess(t *testing.T) { var observerCmd *utils.CMD observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), - []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + []string{"web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "-web", "127.0.0.1:4663", + "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, ) @@ -719,10 +719,10 @@ func TestFullProcess(t *testing.T) { // start observer again observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), - []string{"-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + []string{"web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "-web", "127.0.0.1:4663", + "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, ) diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index eec7174ee..0baefffd6 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -49,9 +49,9 @@ nohup ${MINERBIN} -config node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 # wait miner start sleep 20 -${CLIENTBIN} -config node_c/config.yaml -get-balance +${CLIENTBIN} balance -config node_c/config.yaml -${CLIENTBIN} -config node_c/config.yaml -create 2 -wait-tx-confirm | tee dsn.txt +${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm 2 | tee dsn.txt #get dsn dsn=$(cat dsn.txt) @@ -59,10 +59,10 @@ if [ -z "$dsn" ]; then exit 1 fi -${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ +${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' -${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ +${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'show tables;' | tee result.log grep "1 row" result.log diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index 5e53cd7d2..dbc2e58c8 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -20,20 +20,20 @@ ${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt wallet=$(awk '{print $3}' wallet.txt) #transfer some coin to above address -${BIN}/cql -config ${PROJECT_DIR}/conf/testnet/config.yaml -transfer \ - '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -wait-tx-confirm +${BIN}/cql transfer -config ${PROJECT_DIR}/conf/testnet/config.yaml -wait-tx-confirm \ + '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -${BIN}/cql -get-balance +${BIN}/cql balance -${BIN}/cql -create 2 -wait-tx-confirm | tee dsn.txt +${BIN}/cql create -wait-tx-confirm 2 | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -${BIN}/cql -dsn ${dsn} \ +${BIN}/cql console -dsn ${dsn} \ -command 'create table test_for_new_account(column1 int);' -${BIN}/cql -dsn ${dsn} \ +${BIN}/cql console -dsn ${dsn} \ -command 'show tables;' | tee result.log grep "1 row" result.log From d3b14b73a0058faa1f911a5d5ef6027ff8fffb47 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 11:31:36 +0800 Subject: [PATCH 114/244] Fix old client compatibility test. --- test/compatibility/specific_old.sh | 41 ++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 11 deletions(-) diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 0baefffd6..ba73fcd94 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -49,21 +49,40 @@ nohup ${MINERBIN} -config node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 # wait miner start sleep 20 -${CLIENTBIN} balance -config node_c/config.yaml +# TODO(laodouya) remove v0.4.0 code after v0.5.0 release +if [ $PREV_VERSION == "v0.4.0" ]; then + ${CLIENTBIN} -config node_c/config.yaml -get-balance -${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm 2 | tee dsn.txt + ${CLIENTBIN} -config node_c/config.yaml -wait-tx-confirm -create 2 | tee dsn.txt -#get dsn -dsn=$(cat dsn.txt) -if [ -z "$dsn" ]; then - exit 1 -fi + #get dsn + dsn=$(cat dsn.txt) + if [ -z "$dsn" ]; then + exit 1 + fi + + ${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + + ${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'show tables;' | tee result.log +else + ${CLIENTBIN} balance -config node_c/config.yaml -${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'create table test_for_new_account(column1 int);' + ${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm 2 | tee dsn.txt -${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'show tables;' | tee result.log + #get dsn + dsn=$(cat dsn.txt) + if [ -z "$dsn" ]; then + exit 1 + fi + + ${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'create table test_for_new_account(column1 int);' + + ${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ + -command 'show tables;' | tee result.log +fi grep "1 row" result.log From dcad0f45e90a6cd6f9efdf535f29de8c06a37e8f Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 11:42:31 +0800 Subject: [PATCH 115/244] Fix unit test: cql.test should set -test.coverprofile flag first before sub command. --- sqlchain/observer/observation_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 8ed27bcb9..293f56e93 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -488,9 +488,9 @@ func TestFullProcess(t *testing.T) { var observerCmd *utils.CMD observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), - []string{"web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), + "web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", - "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, @@ -719,9 +719,9 @@ func TestFullProcess(t *testing.T) { // start observer again observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), - []string{"web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), + "web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-bg-log-level", "debug", - "-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), "127.0.0.1:4663", }, "observer", testWorkingDir, logDir, false, From 159e0ff847cb1c9eb2f94abd0231ba70180616f2 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 11:49:36 +0800 Subject: [PATCH 116/244] Fix compatibility test. --- cmd/cql/internal/console.go | 1 - test/compatibility/specific_old.sh | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 6e47b8bbf..b7403c144 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -50,7 +50,6 @@ var CmdConsole = &Command{ Description: "run a console for realtime sql operation", } -//TODO(laodouya) add web/adapter flag {command/filename} var ( variables varsFlag dsn string diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index ba73fcd94..238a1b244 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -50,7 +50,7 @@ nohup ${MINERBIN} -config node_miner_2/config.yaml >${LOGS_DIR}/miner2.log 2>&1 sleep 20 # TODO(laodouya) remove v0.4.0 code after v0.5.0 release -if [ $PREV_VERSION == "v0.4.0" ]; then +if [[ $CLIENTBIN =~ "v0.4.0" ]]; then ${CLIENTBIN} -config node_c/config.yaml -get-balance ${CLIENTBIN} -config node_c/config.yaml -wait-tx-confirm -create 2 | tee dsn.txt From 359d218019f3aae5da71560500cb040e5c9ae48a Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 14:59:59 +0800 Subject: [PATCH 117/244] Change cql create subcommand, only accept metainfo json string as params. --- cmd/cql/internal/create.go | 29 +++++++++++++++-------------- test/compatibility/specific_old.sh | 4 ++-- test/testnet_client/run.sh | 2 +- 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index f7b6df28f..e72e802b2 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -4,15 +4,18 @@ import ( "context" "encoding/json" "fmt" - "strconv" "github.com/CovenantSQL/CovenantSQL/client" ) // CmdCreate is cql create command entity. var CmdCreate = &Command{ - UsageLine: "cql create [-wait-tx-confirm] [dbmeta/nodecount]", - Description: "Create CovenantSQL database by database metainfo or just number for node count", + UsageLine: "cql create [-wait-tx-confirm] [dbmeta]", + Description: ` +Create CovenantSQL database by database metainfo json string(must include node count) + e.g. + cql create -wait-tx-confirm '{"node":2}' +`, } func init() { @@ -26,7 +29,7 @@ func runCreate(cmd *Command, args []string) { configInit() if len(args) != 1 { - ConsoleLog.Error("Create command need database_meta_info string or node_count as params") + ConsoleLog.Error("Create command need database_meta_info string as params") SetExitStatus(1) return } @@ -36,17 +39,15 @@ func runCreate(cmd *Command, args []string) { var meta client.ResourceMeta if err := json.Unmarshal([]byte(metaStr), &meta); err != nil { - // not a instance json, try if it is a number describing node count - nodeCnt, err := strconv.ParseUint(metaStr, 10, 16) - if err != nil { - // still failing - ConsoleLog.WithField("db", metaStr).Error("create database failed: invalid instance description") - SetExitStatus(1) - return - } + ConsoleLog.WithField("db", metaStr).Error("create database failed: invalid instance description") + SetExitStatus(1) + return + } - meta = client.ResourceMeta{} - meta.Node = uint16(nodeCnt) + if meta.Node == 0 { + ConsoleLog.WithField("db", metaStr).Error("create database failed: request node count must > 1") + SetExitStatus(1) + return } txHash, dsn, err := client.Create(meta) diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index 238a1b244..ada976a52 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -53,7 +53,7 @@ sleep 20 if [[ $CLIENTBIN =~ "v0.4.0" ]]; then ${CLIENTBIN} -config node_c/config.yaml -get-balance - ${CLIENTBIN} -config node_c/config.yaml -wait-tx-confirm -create 2 | tee dsn.txt + ${CLIENTBIN} -config node_c/config.yaml -wait-tx-confirm -create '{"node":2}' | tee dsn.txt #get dsn dsn=$(cat dsn.txt) @@ -69,7 +69,7 @@ if [[ $CLIENTBIN =~ "v0.4.0" ]]; then else ${CLIENTBIN} balance -config node_c/config.yaml - ${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm 2 | tee dsn.txt + ${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm '{"node":2}' | tee dsn.txt #get dsn dsn=$(cat dsn.txt) diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index dbc2e58c8..a55b3cf22 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -25,7 +25,7 @@ ${BIN}/cql transfer -config ${PROJECT_DIR}/conf/testnet/config.yaml -wait-tx-con ${BIN}/cql balance -${BIN}/cql create -wait-tx-confirm 2 | tee dsn.txt +${BIN}/cql create -wait-tx-confirm '{"node":2}' | tee dsn.txt #get dsn dsn=$(cat dsn.txt) From 6a9fd16d2873110bb4921eaf13105a2d991d97f6 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 18 Mar 2019 15:31:23 +0800 Subject: [PATCH 118/244] Refactor benchmark codes and scripts Add miner-count, eventual-consistency, and bypass-signature as flags of the complied test binary. --- cmd/cql-minerd/bench.sh | 42 +++--- cmd/cql-minerd/benchCustom.sh | 41 ++++-- cmd/cql-minerd/benchGNTE.sh | 62 +++++---- cmd/cql-minerd/benchTestnet.sh | 39 ++++-- cmd/cql-minerd/integration_test.go | 203 ++++++----------------------- cmd/cql-minerd/main_test.go | 6 +- 6 files changed, 167 insertions(+), 226 deletions(-) diff --git a/cmd/cql-minerd/bench.sh b/cmd/cql-minerd/bench.sh index 837b8d9eb..014bbf799 100755 --- a/cmd/cql-minerd/bench.sh +++ b/cmd/cql-minerd/bench.sh @@ -1,17 +1,27 @@ -#!/bin/bash +#! /usr/bin/env bash +set -euo pipefail -make -C ../../ clean && \ -make -C ../../ use_all_cores && \ -go test -bench=^BenchmarkSQLite$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerOne$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerOneNoSign$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerTwo$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerTwoNoSign$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThree$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThreeNoSign$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerOneWithEventualConsistency$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerOneNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerTwoWithEventualConsistency$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerTwoNoSignWithEventualConsistency$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThreeWithEventualConsistency$ -benchtime=10s -run ^$ && \ -go test -bench=^BenchmarkMinerThreeNoSignWithEventualConsistency$ -benchtime=10s -run ^$ + +main() { + local wd="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.."; pwd)" + make -C "$wd" clean + make -C "$wd" use_all_cores + + local pkg="github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd" + go test -bench=^BenchmarkSQLite$ -benchtime=10s -run ^$ "$pkg" + + local flags=( + "-bench=^BenchmarkMiner$" + "-benchtime=10s" + "-run=^$" + ) + local i + for ((i=1; i<=3; i++)); do + go test "${flags[@]}" "$pkg" -bench-miner-count=$i + go test "${flags[@]}" "$pkg" -bench-miner-count=$i -bench-bypass-signature + go test "${flags[@]}" "$pkg" -bench-miner-count=$i -bench-eventual-consistency + go test "${flags[@]}" "$pkg" -bench-miner-count=$i -bench-bypass-signature -bench-eventual-consistency + done +} + +main "$@" diff --git a/cmd/cql-minerd/benchCustom.sh b/cmd/cql-minerd/benchCustom.sh index 6590d3b74..bdc7dc054 100755 --- a/cmd/cql-minerd/benchCustom.sh +++ b/cmd/cql-minerd/benchCustom.sh @@ -1,12 +1,29 @@ -#!/bin/bash - -make -C ../../ clean && \ -make -C ../../ use_all_cores -export miner_conf_dir=$PWD/../../test/bench_testnet/node_c -go test -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee custom_miner.log -go test -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log -go test -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log - -go test -cpu=1 -bench=^BenchmarkCustomMiner1$ -benchtime=10s -run ^$ |tee -a custom_miner.log -go test -cpu=1 -bench=^BenchmarkCustomMiner2$ -benchtime=10s -run ^$ |tee -a custom_miner.log -go test -cpu=1 -bench=^BenchmarkCustomMiner3$ -benchtime=10s -run ^$ |tee -a custom_miner.log +#! /usr/bin/env bash +set -euo pipefail + +main() { + local wd="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.."; pwd)" + make -C "$wd" clean + make -C "$wd" use_all_cores + + rm -f custom_miner.log + touch custom_miner.log + + local flags=( + "-bench=^BenchmarkCustomMiner$" + "-benchtime=10s" + "-run=^$" + ) + local pkg="github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd" + local i subflags + for ((i=1; i<=3; i++)); do + subflags=( + "-bench-miner-config-dir=$wd/test/bench_testnet/node_c" + "-bench-miner-count=$i" + ) + go test "${flags[@]}" "$pkg" "${subflags[@]}" | tee -a custom_miner.log + go test -cpu=1 "${flags[@]}" "$pkg" "${subflags[@]}" | tee -a custom_miner.log + done +} + +main "$@" diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index c11f14ce5..c22d841e4 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -1,30 +1,42 @@ -#!/bin/bash +#! /usr/bin/env bash +set -euo pipefail -param=$1 +declare pkg="github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd" +declare flags=( + "-bench=^BenchmarkMinerGNTE$" + "-benchtime=10s" + "-run=^$" +) -#make -C ../../ clean && \ -#make -C ../../ use_all_cores && \ +fast() { + go test "${flags[@]}" "$pkg" | tee -a gnte.log + go test "${flags[@]}" "$pkg" -bench-miner-count=2 | tee -a gnte.log + go test -cpu=1 "${flags[@]}" "$pkg" -bench-miner-count=2 | tee -a gnte.log +} -if [ "fast" == "$param" ]; then - go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log - go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log -else - go test -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee gnte.log - go test -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +full() { + local cpus=("" 4 1) counts=(1 2 3 4 8) + local cpu count caseflags + for cpu in "${cpus[@]}"; do + if [[ -z $cpu ]]; then + caseflags=("${flags[@]}") + else + caseflags=("-cpu=$cpu" "${flags[@]}") + fi + for count in "${counts[@]}"; do + go test "${caseflags[@]}" "$pkg" -bench-miner-count=$count | tee -a gnte.log + done + done +} - go test -cpu=4 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=4 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=4 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=4 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=4 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log +main() { + rm -f gnte.log + touch gnte.log + if [[ $# -gt 0 && $1 = "fast" ]]; then + fast + else + full + fi +} - go test -cpu=1 -bench=^BenchmarkMinerGNTE1$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE2$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE3$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE4$ -benchtime=10s -run ^$ |tee -a gnte.log - go test -cpu=1 -bench=^BenchmarkMinerGNTE8$ -benchtime=10s -run ^$ |tee -a gnte.log -fi +main "$@" diff --git a/cmd/cql-minerd/benchTestnet.sh b/cmd/cql-minerd/benchTestnet.sh index 81e0c2912..6e2c86943 100755 --- a/cmd/cql-minerd/benchTestnet.sh +++ b/cmd/cql-minerd/benchTestnet.sh @@ -1,11 +1,32 @@ -#!/bin/bash +#! /usr/bin/env bash +set -euo pipefail -make -C ../../ clean && \ -make -C ../../ use_all_cores -go test -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee testnet.log -go test -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log -go test -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log +main() { + local wd="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.."; pwd)" + make -C "$wd" clean + make -C "$wd" use_all_cores -go test -cpu=1 -bench=^BenchmarkTestnetMiner1$ -benchtime=10s -run ^$ |tee -a testnet.log -go test -cpu=1 -bench=^BenchmarkTestnetMiner2$ -benchtime=10s -run ^$ |tee -a testnet.log -go test -cpu=1 -bench=^BenchmarkTestnetMiner3$ -benchtime=10s -run ^$ |tee -a testnet.log + rm -f testnet.log + touch testnet.log + + local flags=( + "-bench=^BenchmarkTestnetMiner$" + "-benchtime=10s" + "-run=^$" + ) + local pkg="github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd" + local cpus=("" 1) counts=(1 2 3) + local cpu count caseflags + for cpu in "${cpus[@]}"; do + if [[ -z $cpu ]]; then + caseflags=("${flags[@]}") + else + caseflags=("-cpu=$cpu" "${flags[@]}") + fi + for count in "${counts[@]}"; do + go test "${caseflags[@]}" "$pkg" -bench-miner-count=$count | tee -a testnet.log + done + done +} + +main "$@" diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index ff818f234..75c663dad 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -21,6 +21,7 @@ package main import ( "context" "database/sql" + "flag" "fmt" "io/ioutil" "math/rand" @@ -36,7 +37,6 @@ import ( sqlite3 "github.com/CovenantSQL/go-sqlite3-encrypt" . "github.com/smartystreets/goconvey/convey" - yaml "gopkg.in/yaml.v2" "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/conf" @@ -61,11 +61,28 @@ var ( logDir = FJ(testWorkingDir, "./log/") testGasPrice uint64 = 1 testAdvancePayment uint64 = 20000000 -) -var nodeCmds []*utils.CMD + nodeCmds []*utils.CMD + + FJ = filepath.Join -var FJ = filepath.Join + // Benchmark flags + benchMinerCount int + benchBypassSignature bool + benchEventualConsistency bool + benchMinerConfigDir string +) + +func init() { + flag.IntVar(&benchMinerCount, "bench-miner-count", 1, + "Benchmark miner count.") + flag.BoolVar(&benchBypassSignature, "bench-bypass-signature", false, + "Benchmark bypassing signature.") + flag.BoolVar(&benchEventualConsistency, "bench-eventual-consistency", false, + "Benchmark with eventaul consistency.") + flag.StringVar(&benchMinerConfigDir, "bench-miner-config-dir", "", + "Benchmark custome miner config directory.") +} func startNodes() { ctx := context.Background() @@ -732,11 +749,11 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { So(err, ShouldBeNil) } -func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualConsistency bool) { - log.Warnf("benchmark for %d Miners, BypassSignature: %v", minerCount, bypassSign) - asymmetric.BypassSignature = bypassSign +func benchMiner(b *testing.B, minerCount uint16) { + log.Warnf("benchmark for %d Miners, BypassSignature: %v", minerCount, benchBypassSignature) + asymmetric.BypassSignature = benchBypassSignature if minerCount > 0 { - startNodesProfile(bypassSign) + startNodesProfile(benchBypassSignature) utils.WaitToConnect(context.Background(), "127.0.0.1", []int{ 2144, 2145, @@ -770,8 +787,8 @@ func benchMiner(b *testing.B, minerCount uint16, bypassSign bool, useEventualCon // create meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - Node: minerCount, - UseEventualConsistency: useEventualConsistency, + Node: minerCount, + UseEventualConsistency: benchEventualConsistency, }, } // wait for chain service @@ -919,172 +936,32 @@ func benchOutsideMinerWithTargetMinerList( benchDB(b, db, minerCount > 0) } -func BenchmarkMinerOneNoSign(b *testing.B) { - Convey("bench single node", b, func() { - benchMiner(b, 1, true, false) - }) -} - -func BenchmarkMinerTwoNoSign(b *testing.B) { - Convey("bench two node", b, func() { - benchMiner(b, 2, true, false) - }) -} - -func BenchmarkMinerThreeNoSign(b *testing.B) { - Convey("bench three node", b, func() { - benchMiner(b, 3, true, false) - }) -} - -func BenchmarkMinerOne(b *testing.B) { - Convey("bench single node", b, func() { - benchMiner(b, 1, false, false) - }) -} - -func BenchmarkMinerTwo(b *testing.B) { - Convey("bench two node", b, func() { - benchMiner(b, 2, false, false) - }) -} - -func BenchmarkMinerThree(b *testing.B) { - Convey("bench three node", b, func() { - benchMiner(b, 3, false, false) - }) -} - -func BenchmarkMinerOneNoSignWithEventualConsistency(b *testing.B) { - Convey("bench single node", b, func() { - benchMiner(b, 1, true, true) - }) -} - -func BenchmarkMinerTwoNoSignWithEventualConsistency(b *testing.B) { - Convey("bench two node", b, func() { - benchMiner(b, 2, true, true) - }) -} - -func BenchmarkMinerThreeNoSignWithEventualConsistency(b *testing.B) { - Convey("bench three node", b, func() { - benchMiner(b, 3, true, true) - }) -} - -func BenchmarkMinerOneWithEventualConsistency(b *testing.B) { - Convey("bench single node", b, func() { - benchMiner(b, 1, false, true) - }) -} - -func BenchmarkMinerTwoWithEventualConsistency(b *testing.B) { - Convey("bench two node", b, func() { - benchMiner(b, 2, false, true) - }) -} - -func BenchmarkMinerThreeWithEventualConsistency(b *testing.B) { - Convey("bench three node", b, func() { - benchMiner(b, 3, false, true) - }) -} - func BenchmarkClientOnly(b *testing.B) { Convey("bench three node", b, func() { - benchMiner(b, 0, false, false) - }) -} - -func BenchmarkMinerGNTE1(b *testing.B) { - Convey("bench GNTE one node", b, func() { - benchOutsideMiner(b, 1, gnteConfDir) - }) -} - -func BenchmarkMinerGNTE2(b *testing.B) { - Convey("bench GNTE two node", b, func() { - benchOutsideMiner(b, 2, gnteConfDir) - }) -} - -func BenchmarkMinerGNTE3(b *testing.B) { - Convey("bench GNTE three node", b, func() { - benchOutsideMiner(b, 3, gnteConfDir) - }) -} - -func BenchmarkMinerGNTE4(b *testing.B) { - Convey("bench GNTE three node", b, func() { - benchOutsideMiner(b, 4, gnteConfDir) - }) -} - -func BenchmarkMinerGNTE8(b *testing.B) { - Convey("bench GNTE three node", b, func() { - benchOutsideMiner(b, 8, gnteConfDir) - }) -} - -func BenchmarkTestnetMiner1(b *testing.B) { - Convey("bench testnet one node", b, func() { - benchOutsideMiner(b, 1, testnetConfDir) - }) -} - -func BenchmarkTestnetMiner2(b *testing.B) { - Convey("bench testnet one node", b, func() { - benchOutsideMiner(b, 2, testnetConfDir) - }) -} - -func BenchmarkTestnetTargetMiner2(b *testing.B) { - var ( - err error - // Public keys of miners for test - publicKeys = []string{ - "0235abfb93031df7bf776332c510a862e48e81eebea76f5e165406af8fec5215d6", - "03aec5337c0a58b8eff96f8ab30518830ad8e329c74bb30b38901a9395c72340f8", - } - ) - Convey("bench testnet one node", b, func() { - var ( - pubKey asymmetric.PublicKey - addr proto.AccountAddress - targetMiners = make([]proto.AccountAddress, len(publicKeys)) - ) - for i, v := range publicKeys { - err = yaml.Unmarshal([]byte(v), &pubKey) - So(err, ShouldBeNil) - addr, err = crypto.PubKeyHash(&pubKey) - So(err, ShouldBeNil) - targetMiners[i] = addr - } - benchOutsideMinerWithTargetMinerList(b, 2, targetMiners, testnetConfDir) + benchMiner(b, 0) }) } -func BenchmarkTestnetMiner3(b *testing.B) { - Convey("bench testnet one node", b, func() { - benchOutsideMiner(b, 3, testnetConfDir) +func BenchmarkMiner(b *testing.B) { + Convey(fmt.Sprintf("bench %d node(s)", benchMinerCount), b, func() { + benchMiner(b, uint16(benchMinerCount)) }) } -func BenchmarkCustomMiner1(b *testing.B) { - Convey("bench custom one node", b, func() { - benchOutsideMiner(b, 1, os.Getenv("miner_conf_dir")) +func BenchmarkMinerGNTE(b *testing.B) { + Convey(fmt.Sprintf("bench GNTE %d node(s)", benchMinerCount), b, func() { + benchOutsideMiner(b, uint16(benchMinerCount), gnteConfDir) }) } -func BenchmarkCustomMiner2(b *testing.B) { - Convey("bench custom one node", b, func() { - benchOutsideMiner(b, 2, os.Getenv("miner_conf_dir")) +func BenchmarkTestnetMiner(b *testing.B) { + Convey(fmt.Sprintf("bench testnet %d node(s)", benchMinerCount), b, func() { + benchOutsideMiner(b, uint16(benchMinerCount), testnetConfDir) }) } -func BenchmarkCustomMiner3(b *testing.B) { - Convey("bench custom one node", b, func() { - benchOutsideMiner(b, 3, os.Getenv("miner_conf_dir")) +func BenchmarkCustomMiner(b *testing.B) { + Convey(fmt.Sprintf("bench custom %d node(s)", benchMinerCount), b, func() { + benchOutsideMiner(b, uint16(benchMinerCount), benchMinerConfigDir) }) } diff --git a/cmd/cql-minerd/main_test.go b/cmd/cql-minerd/main_test.go index 03a7b6494..9d445cf05 100644 --- a/cmd/cql-minerd/main_test.go +++ b/cmd/cql-minerd/main_test.go @@ -18,9 +18,13 @@ package main -import "testing" +import ( + "flag" + "testing" +) func TestMain(m *testing.M) { + flag.Parse() defer m.Run() main() } From 9eb99bdca435ecb59fa2a5dd011f4b26bb1b70d1 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 18 Mar 2019 15:47:00 +0800 Subject: [PATCH 119/244] Minor fix --- alltest.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/alltest.sh b/alltest.sh index 31421cda0..e362edbfb 100755 --- a/alltest.sh +++ b/alltest.sh @@ -15,7 +15,7 @@ main() { # some benchmarks go test -tags "$UNITTESTTAGS" -bench=^BenchmarkPersistentCaller_Call$ -run ^$ ./rpc/ bash cleanupDB.sh || true - go test -tags "$UNITTESTTAGS" -bench=^BenchmarkMinerTwo$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ + go test -tags "$UNITTESTTAGS" -bench=^BenchmarkMiner$ -benchtime=5s -run ^$ ./cmd/cql-minerd/ -bench-miner-count=2 bash cleanupDB.sh || true } From ad0bb8fca1a8d399e80f67b9812ffb3f4fd9ab0f Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 16:28:13 +0800 Subject: [PATCH 120/244] Update licence in code. --- cmd/cql/internal/adapter.go | 16 ++++++++++++++++ cmd/cql/internal/balance.go | 16 ++++++++++++++++ cmd/cql/internal/base.go | 16 ++++++++++++++++ cmd/cql/internal/cfg.go | 16 ++++++++++++++++ cmd/cql/internal/console.go | 2 +- cmd/cql/internal/create.go | 16 ++++++++++++++++ cmd/cql/internal/drop.go | 16 ++++++++++++++++ cmd/cql/internal/help.go | 16 ++++++++++++++++ cmd/cql/internal/permission.go | 16 ++++++++++++++++ cmd/cql/internal/transfer.go | 16 ++++++++++++++++ cmd/cql/internal/web.go | 16 ++++++++++++++++ cmd/cql/main.go | 3 +-- cmd/cql/main_test.go | 2 +- 13 files changed, 163 insertions(+), 4 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 24a638fc1..bc13e2029 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 7a9688b2a..0b7455104 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index ae829b350..51d7fbc39 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 1db4b2c08..6e3aca32e 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index b7403c144..8a2908860 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -1,6 +1,6 @@ /* * Copyright 2016-2018 Kenneth Shaw. - * Copyright 2018 The CovenantSQL Authors. + * Copyright 2018-2019 The CovenantSQL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index e72e802b2..b3e34fcf2 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 32c1b4e01..ca1b208f0 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index d7fd1bf13..b80dc852d 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index c71fe8bc3..d7b6fca1a 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index b3d6025bf..d70e4aee2 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 78ec992b1..b969de6cb 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -1,3 +1,19 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + package internal import ( diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 4a2e4d100..131d222c9 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -1,6 +1,5 @@ /* - * Copyright 2016-2018 Kenneth Shaw. - * Copyright 2018 The CovenantSQL Authors. + * Copyright 2018-2019 The CovenantSQL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. diff --git a/cmd/cql/main_test.go b/cmd/cql/main_test.go index 03a7b6494..ef97d55c7 100644 --- a/cmd/cql/main_test.go +++ b/cmd/cql/main_test.go @@ -1,7 +1,7 @@ // +build testbinary /* - * Copyright 2018 The CovenantSQL Authors. + * Copyright 2018-2019 The CovenantSQL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. From 641b58a0f56205ce227ab98c2adb89cd1ef06e49 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 18 Mar 2019 18:53:41 +0800 Subject: [PATCH 121/244] Add function to make bench name for parsing --- cmd/cql-minerd/bench.sh | 2 +- cmd/cql-minerd/integration_test.go | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/cmd/cql-minerd/bench.sh b/cmd/cql-minerd/bench.sh index 5862b5264..e9d29d6e2 100755 --- a/cmd/cql-minerd/bench.sh +++ b/cmd/cql-minerd/bench.sh @@ -8,7 +8,7 @@ main() { make -C "$wd" use_all_cores local pkg="github.com/CovenantSQL/CovenantSQL/cmd/cql-minerd" - go test -bench=^BenchmarkSQLite$ -benchtime=10s -run=^$ "$pkg" + #go test -bench=^BenchmarkSQLite$ -benchtime=10s -run=^$ "$pkg" local flags=( "-bench=^BenchmarkMiner$" diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 75c663dad..a99a40984 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -29,6 +29,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strings" "sync" "sync/atomic" "syscall" @@ -648,6 +649,19 @@ func cleanBenchTable(db *sql.DB) { So(err, ShouldBeNil) } +func makeBenchName(trailings ...string) string { + var parts = make([]string, 0, 3+len(trailings)) + parts = append(parts, fmt.Sprintf("%dMiner", benchMinerCount)) + if benchBypassSignature { + parts = append(parts, "BypassSignature") + } + if benchEventualConsistency { + parts = append(parts, "EventualConsistency") + } + parts = append(parts, trailings...) + return strings.Join(parts, "_") +} + func benchDB(b *testing.B, db *sql.DB, createDB bool) { var err error if createDB { @@ -659,7 +673,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { var i int64 i = -1 - b.Run("benchmark INSERT", func(b *testing.B) { + b.Run(makeBenchName("INSERT"), func(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { @@ -704,7 +718,7 @@ func benchDB(b *testing.B, db *sql.DB, createDB bool) { } log.Warnf("row Count: %v", count) - b.Run("benchmark SELECT", func(b *testing.B) { + b.Run(makeBenchName("SELECT"), func(b *testing.B) { b.ResetTimer() b.RunParallel(func(pb *testing.PB) { for pb.Next() { From 14395bc19569ee5b1ef60132c46d1166f4f463b4 Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Mon, 18 Mar 2019 19:01:56 +0800 Subject: [PATCH 122/244] Add eventual consistency benchmark in GNTE --- cmd/cql-minerd/benchGNTE.sh | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/cmd/cql-minerd/benchGNTE.sh b/cmd/cql-minerd/benchGNTE.sh index c22d841e4..3e0657123 100755 --- a/cmd/cql-minerd/benchGNTE.sh +++ b/cmd/cql-minerd/benchGNTE.sh @@ -9,12 +9,14 @@ declare flags=( ) fast() { - go test "${flags[@]}" "$pkg" | tee -a gnte.log - go test "${flags[@]}" "$pkg" -bench-miner-count=2 | tee -a gnte.log - go test -cpu=1 "${flags[@]}" "$pkg" -bench-miner-count=2 | tee -a gnte.log + echo "Fast benchmarking with flags: $@" + go test "${flags[@]}" "$pkg" "$@" | tee -a gnte.log + go test "${flags[@]}" "$pkg" "$@" -bench-miner-count=2 | tee -a gnte.log + go test -cpu=1 "${flags[@]}" "$pkg" "$@" -bench-miner-count=2 | tee -a gnte.log } full() { + echo "Full benchmarking with flags: $@" local cpus=("" 4 1) counts=(1 2 3 4 8) local cpu count caseflags for cpu in "${cpus[@]}"; do @@ -24,7 +26,7 @@ full() { caseflags=("-cpu=$cpu" "${flags[@]}") fi for count in "${counts[@]}"; do - go test "${caseflags[@]}" "$pkg" -bench-miner-count=$count | tee -a gnte.log + go test "${caseflags[@]}" "$pkg" "$@" -bench-miner-count=$count | tee -a gnte.log done done } @@ -36,6 +38,7 @@ main() { fast else full + full -bench-eventual-consistency fi } From 577c112f35374b3d9c290c3c3b63e12dacee86ea Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 19:16:12 +0800 Subject: [PATCH 123/244] Add help info while no command provide. --- cmd/cql/internal/adapter.go | 9 +++++-- cmd/cql/internal/balance.go | 11 +++++++-- cmd/cql/internal/base.go | 5 +++- cmd/cql/internal/cfg.go | 3 --- cmd/cql/internal/console.go | 15 ++++++++++-- cmd/cql/internal/create.go | 15 ++++++++---- cmd/cql/internal/drop.go | 13 ++++++++-- cmd/cql/internal/help.go | 7 ++++-- cmd/cql/internal/permission.go | 13 ++++++++-- cmd/cql/internal/transfer.go | 14 +++++++++-- cmd/cql/internal/web.go | 9 +++++-- cmd/cql/main.go | 45 +++++++++++++++++++++++++--------- types/token.go | 4 ++- 13 files changed, 125 insertions(+), 38 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index bc13e2029..1d93e282f 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -33,8 +33,13 @@ var ( // CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ - UsageLine: "cql adapter [-tmp-path path] [-bg-log-level level] [address]", - Description: "Adapter command serve a database chain adapter, e.g. :7784", + UsageLine: "cql adapter [-config file] [-password masterkey] [-tmp-path path] [-bg-log-level level] [address]", + Short: "start a database chain adapter", + Long: ` +Adapter command serve a database chain adapter +e.g. + cql adapter 127.0.0.1:7784 +`, } func init() { diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 0b7455104..105b1f109 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -29,8 +29,15 @@ var ( // CmdBalance is cql balance command entity. var CmdBalance = &Command{ - UsageLine: "cql balance [-token token_name]", - Description: "Get CovenantSQL balance of current account", + UsageLine: "cql balance [-config file] [-password masterkey] [-token token_name]", + Short: "get balance of current account", + Long: ` +Balance command can get CovenantSQL token balance of current account +e.g. + cql balance + + cql balance -token Particle +`, } func init() { diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 51d7fbc39..4d9b2a432 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -52,8 +52,11 @@ type Command struct { // The first word in the line is taken to be the command name. UsageLine string + // Short is the short description shown in the 'cql help' output. + Short string + // Long is the long message shown in the 'cql help ' output. - Description string + Long string // Flag is a set of flags specific to this command. Flag flag.FlagSet diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 6e3aca32e..1ce9206b8 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -41,9 +41,6 @@ var ( // Shard chain explorer/adapter stuff tmpPath string // background observer and explorer block and log file path bgLogLevel string // background log level - - // CmdName stores cql command strings for error messages. - CmdName string ) func addCommonFlags(cmd *Command) { diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 8a2908860..581b06f2b 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -46,8 +46,19 @@ import ( // CmdConsole is cql console command entity. var CmdConsole = &Command{ - UsageLine: "cql console [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables]", - Description: "run a console for realtime sql operation", + UsageLine: "cql console [-config file] [-password masterkey] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", + Short: "run a console for realtime sql operation", + Long: ` +Console command can run a realtime sql console for CovenantSQL +The -dsn param is required +e.g. + cql console -dsn covenant://the_dsn_of_your_database + +There is also a -command param for sql script, and a -file param for read sql in file. +If those params are set, it will run sql script and exit without staying console mode. +e.g. + cql console -dsn covenant://the_dsn_of_your_database -command "create table test1(test2 int);" +`, } var ( diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index b3e34fcf2..debabcabb 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -26,11 +26,16 @@ import ( // CmdCreate is cql create command entity. var CmdCreate = &Command{ - UsageLine: "cql create [-wait-tx-confirm] [dbmeta]", - Description: ` -Create CovenantSQL database by database metainfo json string(must include node count) - e.g. - cql create -wait-tx-confirm '{"node":2}' + UsageLine: "cql create [-config file] [-password masterkey] [-wait-tx-confirm] [dbmeta]", + Short: "create a database", + Long: ` +Create CovenantSQL database by database metainfo json string, metainfo must include node count. +e.g. + cql create '{"node":2}' + +Since CovenantSQL is blockchain database, you may want get confirm of creation. +e.g. + cql create -wait-tx-confirm '{"node":2}' `, } diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index ca1b208f0..fb05c3df3 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -22,8 +22,17 @@ import ( // CmdDrop is cql drop command entity. var CmdDrop = &Command{ - UsageLine: "cql drop [-wait-tx-confirm] [dsn/dbid]", - Description: "Drop CovenantSQL database by dsn or database id", + UsageLine: "cql drop [-config file] [-password masterkey] [-wait-tx-confirm] [dsn/dbid]", + Short: "drop a database by dsn or database id", + Long: ` +Drop command can drop a database by dsn or database id +e.g. + cql drop covenant://the_dsn_of_your_database + +Since CovenantSQL is blockchain database, you may want get confirm of drop operation. +e.g. + cql drop -wait-tx-confirm covenant://the_dsn_of_your_database +`, } func init() { diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index b80dc852d..3ef768079 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -30,8 +30,11 @@ var ( // CmdVersion is cql version command entity. var CmdVersion = &Command{ - UsageLine: "cql version", - Description: "Show cql build version infomation", + UsageLine: "cql version", + Short: "show build version infomation", + Long: ` +Use "cql help " for more information about a command. +`, } func init() { diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index d7b6fca1a..af2b150c7 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -26,8 +26,17 @@ import ( // CmdPermission is cql permission command entity. var CmdPermission = &Command{ - UsageLine: "cql permission [-wait-tx-confirm] [perm_meta]", - Description: "Update user's permission on specific sqlchain", + UsageLine: "cql permission [-config file] [-password masterkey] [-wait-tx-confirm] [perm_meta]", + Short: "update user's permission on specific sqlchain", + Long: ` +Permission command can give a user specific permissions on your database +e.g. + cql permission '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' + +Since CovenantSQL is blockchain database, you may want get confirm of permission update. +e.g. + cql permission -wait-tx-confirm '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' +`, } func init() { diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index d70e4aee2..e6cdf07ae 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -29,8 +29,18 @@ import ( // CmdTransfer is cql transfer command entity. var CmdTransfer = &Command{ - UsageLine: "cql transfer [-wait-tx-confirm] [meta_json]", - Description: "Transfer token to target account", + UsageLine: "cql transfer [-config file] [-password masterkey] [-wait-tx-confirm] [meta_json]", + Short: "transfer token to target account", + Long: ` +Transfer command can transfer your token to target account. +Command argument is json meta info of a token transaction. +e.g. + cql transfer '{"addr":"your_account_addr","amount":"100 Particle"}' + +Since CovenantSQL is blockchain database, you may want get confirm of permission update. +e.g. + cql transfer -wait-tx-confirm '{"addr":"your_account_addr","amount":"100 Particle"}' +`, } func init() { diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index b969de6cb..9882c45b4 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -32,8 +32,13 @@ var ( // CmdWeb is cql web command. var CmdWeb = &Command{ - UsageLine: "cql web [-tmp-path path] [-bg-log-level level] [address]", - Description: "Web command serve a database chain explorer, e.g. :8546", + UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] [address]", + Short: "start a database chain web explorer", + Long: ` +Web command serve a database chain web explorer. +e.g. + cql web 127.0.0.1:8546", +`, } func init() { diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 131d222c9..df417bbee 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -21,7 +21,6 @@ import ( "fmt" "math/rand" "os" - "strings" "time" "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" @@ -34,14 +33,14 @@ var ( func init() { internal.CqlCommands = []*internal.Command{ internal.CmdConsole, - internal.CmdVersion, - internal.CmdBalance, internal.CmdCreate, internal.CmdDrop, - internal.CmdPermission, + internal.CmdBalance, internal.CmdTransfer, + internal.CmdPermission, internal.CmdWeb, internal.CmdAdapter, + internal.CmdVersion, } } @@ -60,7 +59,6 @@ func main() { mainUsage() } - internal.CmdName = args[0] // for error messages if args[0] == "help" { mainUsage() return @@ -82,16 +80,39 @@ func main() { internal.Exit() return } - helpArg := "" - if i := strings.LastIndex(internal.CmdName, " "); i >= 0 { - helpArg = " " + internal.CmdName[:i] - } - fmt.Fprintf(os.Stderr, "cql %s: unknown command\nRun 'cql help%s' for usage.\n", internal.CmdName, helpArg) + fmt.Fprintf(os.Stderr, "cql %s: unknown command\nRun 'cql help' for usage.\n", args[0]) internal.SetExitStatus(2) internal.Exit() } func mainUsage() { - //TODO(laodouya) print stderr main usage - os.Exit(2) + helpHead := `cql is a tool for managing CovenantSQL database. + +Usage: + + cql [-params] [arguments] + +The commands are: + +` + helpTail := ` +Use "cql help " for more information about a command. +` + + helpMsg := helpHead + for _, cmd := range internal.CqlCommands { + if cmd.Name() == "help" { + continue + } + cmdName := cmd.Name() + if len(cmd.Name()) < 8 { + cmdName += "\t" + } + helpMsg += "\t" + cmdName + "\t" + cmd.Short + "\n" + } + helpMsg += helpTail + + fmt.Fprintf(os.Stderr, helpMsg) + internal.SetExitStatus(2) + internal.Exit() } diff --git a/types/token.go b/types/token.go index 43df584bd..15af81d32 100644 --- a/types/token.go +++ b/types/token.go @@ -16,6 +16,8 @@ package types +import "strings" + //go:generate hsp // TokenType defines token's type. @@ -58,7 +60,7 @@ func (t TokenType) String() string { func FromString(t string) TokenType { var i TokenType for ; i < SupportTokenNumber; i++ { - if TokenList[i] == t { + if strings.ToLower(TokenList[i]) == strings.ToLower(t) { return i } } From 4d6dd7e9086d5f1a0650d5b585c13894e131fba5 Mon Sep 17 00:00:00 2001 From: laodouya Date: Mon, 18 Mar 2019 19:32:48 +0800 Subject: [PATCH 124/244] Add help command to show sub command usage. --- cmd/cql/internal/help.go | 64 ++++++++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 42 ++------------------------ 2 files changed, 67 insertions(+), 39 deletions(-) diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 3ef768079..ec9dd707d 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -18,6 +18,7 @@ package internal import ( "fmt" + "os" "runtime" ) @@ -37,8 +38,18 @@ Use "cql help " for more information about a command. `, } +// CmdHelp is cql help command entity. +var CmdHelp = &Command{ + UsageLine: "cql help [command]", + Short: "show help of sub commands", + Long: ` +Use "cql help " for more information about a command. +`, +} + func init() { CmdVersion.Run = runVersion + CmdHelp.Run = runHelp } // PrintVersion prints program git version. @@ -56,3 +67,56 @@ func PrintVersion(printLog bool) string { func runVersion(cmd *Command, args []string) { fmt.Print(PrintVersion(false)) } + +func runHelp(cmd *Command, args []string) { + if len(args) != 1 { + MainUsage() + } + + cmdName := args[0] + for _, cmd := range CqlCommands { + if cmd.Name() != cmdName { + continue + } + fmt.Fprintf(os.Stderr, cmd.UsageLine) + fmt.Fprintf(os.Stderr, cmd.Long) + SetExitStatus(2) + return + } + + //Not support command + MainUsage() +} + +// MainUsage prints cql base help +func MainUsage() { + helpHead := `cql is a tool for managing CovenantSQL database. + +Usage: + + cql [-params] [arguments] + +The commands are: + +` + helpTail := ` +Use "cql help " for more information about a command. +` + + helpMsg := helpHead + for _, cmd := range CqlCommands { + if cmd.Name() == "help" { + continue + } + cmdName := cmd.Name() + if len(cmd.Name()) < 8 { + cmdName += "\t" + } + helpMsg += "\t" + cmdName + "\t" + cmd.Short + "\n" + } + helpMsg += helpTail + + fmt.Fprintf(os.Stderr, helpMsg) + SetExitStatus(2) + Exit() +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index df417bbee..fc75ffcd4 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -41,6 +41,7 @@ func init() { internal.CmdWeb, internal.CmdAdapter, internal.CmdVersion, + internal.CmdHelp, } } @@ -51,17 +52,12 @@ func main() { // set random rand.Seed(time.Now().UnixNano()) - flag.Usage = mainUsage + flag.Usage = internal.MainUsage flag.Parse() args := flag.Args() if len(args) < 1 { - mainUsage() - } - - if args[0] == "help" { - mainUsage() - return + internal.MainUsage() } internal.PrintVersion(true) @@ -84,35 +80,3 @@ func main() { internal.SetExitStatus(2) internal.Exit() } - -func mainUsage() { - helpHead := `cql is a tool for managing CovenantSQL database. - -Usage: - - cql [-params] [arguments] - -The commands are: - -` - helpTail := ` -Use "cql help " for more information about a command. -` - - helpMsg := helpHead - for _, cmd := range internal.CqlCommands { - if cmd.Name() == "help" { - continue - } - cmdName := cmd.Name() - if len(cmd.Name()) < 8 { - cmdName += "\t" - } - helpMsg += "\t" + cmdName + "\t" + cmd.Short + "\n" - } - helpMsg += helpTail - - fmt.Fprintf(os.Stderr, helpMsg) - internal.SetExitStatus(2) - internal.Exit() -} From 90e35a80f83b3660a1b25ac2db666cc50a4c9c45 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 19 Mar 2019 15:14:05 +0800 Subject: [PATCH 125/244] Modify cql command help grammar. --- cmd/cql/internal/adapter.go | 4 ++-- cmd/cql/internal/balance.go | 6 +++--- cmd/cql/internal/console.go | 10 +++++----- cmd/cql/internal/create.go | 6 +++--- cmd/cql/internal/drop.go | 6 +++--- cmd/cql/internal/help.go | 4 ++-- cmd/cql/internal/permission.go | 6 +++--- cmd/cql/internal/transfer.go | 8 ++++---- cmd/cql/internal/web.go | 4 ++-- cmd/cql/main.go | 4 +++- 10 files changed, 30 insertions(+), 28 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 1d93e282f..789dc3e5c 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -36,9 +36,9 @@ var CmdAdapter = &Command{ UsageLine: "cql adapter [-config file] [-password masterkey] [-tmp-path path] [-bg-log-level level] [address]", Short: "start a database chain adapter", Long: ` -Adapter command serve a database chain adapter +Adapter command serves a database chain adapter e.g. - cql adapter 127.0.0.1:7784 + cql adapter 127.0.0.1:7784 `, } diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 105b1f109..9e7c17b75 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -30,13 +30,13 @@ var ( // CmdBalance is cql balance command entity. var CmdBalance = &Command{ UsageLine: "cql balance [-config file] [-password masterkey] [-token token_name]", - Short: "get balance of current account", + Short: "get the balance of current account", Long: ` Balance command can get CovenantSQL token balance of current account e.g. - cql balance + cql balance - cql balance -token Particle + cql balance -token Particle `, } diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 581b06f2b..adebc7988 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -49,15 +49,15 @@ var CmdConsole = &Command{ UsageLine: "cql console [-config file] [-password masterkey] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", Short: "run a console for realtime sql operation", Long: ` -Console command can run a realtime sql console for CovenantSQL +Console command can run a real-time SQL console for CovenantSQL The -dsn param is required e.g. - cql console -dsn covenant://the_dsn_of_your_database + cql console -dsn covenant://the_dsn_of_your_database -There is also a -command param for sql script, and a -file param for read sql in file. -If those params are set, it will run sql script and exit without staying console mode. +There is also a -command param for SQL script, and a -file param for reading SQL in a file. +If those params are set, it will run SQL script and exit without staying console mode. e.g. - cql console -dsn covenant://the_dsn_of_your_database -command "create table test1(test2 int);" + cql console -dsn covenant://the_dsn_of_your_database -command "create table test1(test2 int);" `, } diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index debabcabb..2d3a293a3 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -29,13 +29,13 @@ var CmdCreate = &Command{ UsageLine: "cql create [-config file] [-password masterkey] [-wait-tx-confirm] [dbmeta]", Short: "create a database", Long: ` -Create CovenantSQL database by database metainfo json string, metainfo must include node count. +Create CovenantSQL database by database meta info JSON string, meta info must include node count. e.g. - cql create '{"node":2}' + cql create '{"node":2}' Since CovenantSQL is blockchain database, you may want get confirm of creation. e.g. - cql create -wait-tx-confirm '{"node":2}' + cql create -wait-tx-confirm '{"node":2}' `, } diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index fb05c3df3..714264529 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -25,13 +25,13 @@ var CmdDrop = &Command{ UsageLine: "cql drop [-config file] [-password masterkey] [-wait-tx-confirm] [dsn/dbid]", Short: "drop a database by dsn or database id", Long: ` -Drop command can drop a database by dsn or database id +Drop command can drop a database by DSN or database id e.g. - cql drop covenant://the_dsn_of_your_database + cql drop covenant://the_dsn_of_your_database Since CovenantSQL is blockchain database, you may want get confirm of drop operation. e.g. - cql drop -wait-tx-confirm covenant://the_dsn_of_your_database + cql drop -wait-tx-confirm covenant://the_dsn_of_your_database `, } diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index ec9dd707d..8373cb681 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -78,7 +78,7 @@ func runHelp(cmd *Command, args []string) { if cmd.Name() != cmdName { continue } - fmt.Fprintf(os.Stderr, cmd.UsageLine) + fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine) fmt.Fprintf(os.Stderr, cmd.Long) SetExitStatus(2) return @@ -94,7 +94,7 @@ func MainUsage() { Usage: - cql [-params] [arguments] + cql [-params] [arguments] The commands are: diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index af2b150c7..695044d1c 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -29,13 +29,13 @@ var CmdPermission = &Command{ UsageLine: "cql permission [-config file] [-password masterkey] [-wait-tx-confirm] [perm_meta]", Short: "update user's permission on specific sqlchain", Long: ` -Permission command can give a user specific permissions on your database +Permission command can give a user some specific permissions on your database e.g. - cql permission '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' + cql permission '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' Since CovenantSQL is blockchain database, you may want get confirm of permission update. e.g. - cql permission -wait-tx-confirm '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' + cql permission -wait-tx-confirm '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' `, } diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index e6cdf07ae..97de11ed5 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -32,14 +32,14 @@ var CmdTransfer = &Command{ UsageLine: "cql transfer [-config file] [-password masterkey] [-wait-tx-confirm] [meta_json]", Short: "transfer token to target account", Long: ` -Transfer command can transfer your token to target account. -Command argument is json meta info of a token transaction. +Transfer command can transfer your token to the target account. +Command argument is JSON meta info of a token transaction. e.g. - cql transfer '{"addr":"your_account_addr","amount":"100 Particle"}' + cql transfer '{"addr":"your_account_addr","amount":"100 Particle"}' Since CovenantSQL is blockchain database, you may want get confirm of permission update. e.g. - cql transfer -wait-tx-confirm '{"addr":"your_account_addr","amount":"100 Particle"}' + cql transfer -wait-tx-confirm '{"addr":"your_account_addr","amount":"100 Particle"}' `, } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 9882c45b4..3a56b5656 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -35,9 +35,9 @@ var CmdWeb = &Command{ UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] [address]", Short: "start a database chain web explorer", Long: ` -Web command serve a database chain web explorer. +Web command serves a database chain web explorer. e.g. - cql web 127.0.0.1:8546", + cql web 127.0.0.1:8546", `, } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index fc75ffcd4..7529a0df9 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -60,7 +60,9 @@ func main() { internal.MainUsage() } - internal.PrintVersion(true) + if args[0] != "version" && args[0] != "help" { + internal.PrintVersion(true) + } for _, cmd := range internal.CqlCommands { if cmd.Name() != args[0] { From 086d11ccbc0b46372ca52b0f3bb04ee585e4acff Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 19 Mar 2019 15:20:56 +0800 Subject: [PATCH 126/244] Update cql README. --- cmd/cql/README-zh.md | 11 ++++++----- cmd/cql/README.md | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/cmd/cql/README-zh.md b/cmd/cql/README-zh.md index a591243b3..99ba7dbfb 100644 --- a/cmd/cql/README-zh.md +++ b/cmd/cql/README-zh.md @@ -18,8 +18,9 @@ $ go get github.com/CovenantSQL/CovenantSQL/cmd/cql ## 检查钱包余额 使用 `cql` 命令来检查钱包余额: + ```bash -$ cql -get-balance +$ cql balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -36,7 +37,7 @@ INFO[0000] covenant coin balance is: 0 caller="main.go:247 mai ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -create 1 +$ cql create '{"node":1}' INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -45,17 +46,17 @@ INFO[0000] INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4fd5562683543276b72c488966d616bfe0fe4d213 caller="main.go:297 main.main" ``` -这里 `-create 1` 表示创建一个单节点的 SQLChain。 +这里 `create '{"node":1}'` 表示创建一个单节点的 SQLChain。 ```bash -$ cql -dsn covenantsql://address +$ cql console -dsn covenantsql://address ``` `address` 就是你的数据库 ID。 `cql` 命令的详细使用帮助如下: ```bash -$ cql -help +$ cql help ``` ## 使用 `cql` diff --git a/cmd/cql/README.md b/cmd/cql/README.md index ab5b3c9f5..40c9d0c90 100644 --- a/cmd/cql/README.md +++ b/cmd/cql/README.md @@ -18,8 +18,9 @@ See: [cql-utils doc](https://github.com/CovenantSQL/CovenantSQL/tree/develop/cmd ## Check balance Use `cql` to check your wallet balance: + ```bash -$ cql -get-balance +$ cql balance INFO[0000] ### Public Key ### 0388954cf083bb6bb2b9c7248849b57c76326296fcc0d69764fc61eedb5b8d820c @@ -37,7 +38,7 @@ You can get a database id when create a new SQL Chain: ```bash # if a non-default password applied on master key, use `-password` to pass it -$ cql -create 1 +$ cql create '{"node":1}' INFO[0000] ### Public Key ### 039bc931161383c994ab9b81e95ddc1494b0efeb1cb735bb91e1043a1d6b98ebfd @@ -46,17 +47,17 @@ INFO[0000] INFO[0000] the newly created database is: covenantsql://0e9103318821b027f35b96c4fd5562683543276b72c488966d616bfe0fe4d213 caller="main.go:297 main.main" ``` -Here, `-create 1` refers that there is only one node in SQL Chain. +Here, `create '{"node":1}'` refers that there is only one node in SQL Chain. ```bash -$ cql -dsn covenantsql://address +$ cql console -dsn covenantsql://address ``` `address` is database id. Show the complete usage of `cql`: ```bash -$ cql -help +$ cql help ``` ## Use the `cql` From c2a8a9068b4cc5b44dae8ff02bc5146a6ea0f942 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 19 Mar 2019 15:30:04 +0800 Subject: [PATCH 127/244] Update cql README. --- cmd/cql/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql/README.md b/cmd/cql/README.md index 40c9d0c90..94c211e58 100644 --- a/cmd/cql/README.md +++ b/cmd/cql/README.md @@ -1,4 +1,4 @@ -This doc introduce the usage of CovenantSQL commandline client `cql`. `cql` is a command line interface for batch scripting used for creating, querying, updating, and deleting the SQLChain and database adhere to the SQLChain. +This doc introduces the usage of CovenantSQL command line client `cql`. `cql` is a command line interface for batch scripting used for creating, querying, updating, and deleting the SQLChain and database adhere to the SQLChain. ## Install Download [Latest Release](https://github.com/CovenantSQL/CovenantSQL/releases) or build from src: From b4688354e205a51f12bbd30789f20009ae00757b Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 19 Mar 2019 15:34:59 +0800 Subject: [PATCH 128/244] Update cql help description. --- cmd/cql/internal/adapter.go | 4 ++-- cmd/cql/internal/console.go | 8 ++++---- cmd/cql/internal/drop.go | 4 ++-- cmd/cql/internal/web.go | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 789dc3e5c..856409751 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -34,9 +34,9 @@ var ( // CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ UsageLine: "cql adapter [-config file] [-password masterkey] [-tmp-path path] [-bg-log-level level] [address]", - Short: "start a database chain adapter", + Short: "start a SQLChain adapter", Long: ` -Adapter command serves a database chain adapter +Adapter command serves a SQLChain adapter e.g. cql adapter 127.0.0.1:7784 `, diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index adebc7988..57b6e0697 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -47,17 +47,17 @@ import ( // CmdConsole is cql console command entity. var CmdConsole = &Command{ UsageLine: "cql console [-config file] [-password masterkey] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", - Short: "run a console for realtime sql operation", + Short: "run a console for interactive sql operation", Long: ` -Console command can run a real-time SQL console for CovenantSQL +Console command can run a interactive SQL console for CovenantSQL The -dsn param is required e.g. - cql console -dsn covenant://the_dsn_of_your_database + cql console -dsn covenantsql://the_dsn_of_your_database There is also a -command param for SQL script, and a -file param for reading SQL in a file. If those params are set, it will run SQL script and exit without staying console mode. e.g. - cql console -dsn covenant://the_dsn_of_your_database -command "create table test1(test2 int);" + cql console -dsn covenantsql://the_dsn_of_your_database -command "create table test1(test2 int);" `, } diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 714264529..06cec51d5 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -27,11 +27,11 @@ var CmdDrop = &Command{ Long: ` Drop command can drop a database by DSN or database id e.g. - cql drop covenant://the_dsn_of_your_database + cql drop covenantsql://the_dsn_of_your_database Since CovenantSQL is blockchain database, you may want get confirm of drop operation. e.g. - cql drop -wait-tx-confirm covenant://the_dsn_of_your_database + cql drop -wait-tx-confirm covenantsql://the_dsn_of_your_database `, } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 3a56b5656..154e8a344 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -33,9 +33,9 @@ var ( // CmdWeb is cql web command. var CmdWeb = &Command{ UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] [address]", - Short: "start a database chain web explorer", + Short: "start a SQLChain web explorer", Long: ` -Web command serves a database chain web explorer. +Web command serves a SQLChain web explorer. e.g. cql web 127.0.0.1:8546", `, From 1a9fa33769148a3a8af959c3419bbf5ceb7a77f7 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 11:31:44 +0800 Subject: [PATCH 129/244] Make cql read password of master key from terminal as default. --- cmd/cql/internal/cfg.go | 27 ++++++++++++++++++++++++++- cmd/cql/internal/help.go | 2 ++ cmd/cql/internal/web.go | 2 +- 3 files changed, 29 insertions(+), 2 deletions(-) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 1ce9206b8..153d088ed 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -19,8 +19,10 @@ package internal import ( "context" "errors" + "fmt" "os" "path/filepath" + "syscall" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" @@ -30,12 +32,14 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh/terminal" ) // These are general flags used by console and other commands. var ( configFile string password string + noPassword bool waitTxConfirmation bool // wait for transaction confirmation before exiting // Shard chain explorer/adapter stuff @@ -45,9 +49,10 @@ var ( func addCommonFlags(cmd *Command) { cmd.Flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") - cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql") + cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql(NOT SAFE, for debug or script only)") // Undocumented, unstable debugging flags. + cmd.Flag.BoolVar(&noPassword, "no-password", false, "Use empty password for master key") cmd.Flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") } @@ -55,6 +60,10 @@ func addCommonFlags(cmd *Command) { func configInit() { configFile = utils.HomeDirExpand(configFile) + if password == "" { + password = readMasterKey(noPassword) + } + // init covenantsql driver if err := client.Init(configFile, []byte(password)); err != nil { ConsoleLog.WithError(err).Error("init covenantsql client failed") @@ -107,3 +116,19 @@ func bgServerInit() { log.SetOutput(bgLog) log.SetStringLevel(bgLogLevel, log.InfoLevel) } + +// readMasterKey reads the password of private key from terminal +func readMasterKey(skip bool) string { + if skip { + return "" + } + fmt.Println("Enter master key(press Enter for default: \"\"): ") + bytePwd, err := terminal.ReadPassword(int(syscall.Stdin)) + fmt.Println() + if err != nil { + ConsoleLog.Errorf("read master key failed: %v", err) + SetExitStatus(1) + Exit() + } + return string(bytePwd) +} diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 8373cb681..9c3aa5a80 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -80,6 +80,8 @@ func runHelp(cmd *Command, args []string) { } fmt.Fprintf(os.Stderr, "usage: %s\n", cmd.UsageLine) fmt.Fprintf(os.Stderr, cmd.Long) + fmt.Fprintf(os.Stderr, "\nParams:\n") + cmd.Flag.PrintDefaults() SetExitStatus(2) return } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index 154e8a344..a51aab7ce 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -37,7 +37,7 @@ var CmdWeb = &Command{ Long: ` Web command serves a SQLChain web explorer. e.g. - cql web 127.0.0.1:8546", + cql web 127.0.0.1:8546 `, } From b707682c7d6a53fa73130568ac1eda51fd5357a3 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 11:41:52 +0800 Subject: [PATCH 130/244] Update test case with -no-password. --- bin/docker-entry.sh | 6 +++--- sqlchain/observer/observation_test.go | 8 ++++++-- test/compatibility/specific_old.sh | 8 ++++---- test/testnet_client/run.sh | 10 +++++----- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/bin/docker-entry.sh b/bin/docker-entry.sh index 105107c27..be90e0d04 100755 --- a/bin/docker-entry.sh +++ b/bin/docker-entry.sh @@ -12,16 +12,16 @@ blockproducer) exec /app/cqld -config "${COVENANT_CONF}" -metric-web "${METRIC_WEB_ADDR}" "${@}" ;; observer) - exec /app/cql web -config "${COVENANT_CONF}" "${COVENANTSQL_OBSERVER_ADDR}" "${@}" + exec /app/cql web -config "${COVENANT_CONF}" -no-password "${COVENANTSQL_OBSERVER_ADDR}" "${@}" ;; adapter) - exec /app/cql adapter -config "${COVENANT_CONF}" "${COVENANTSQL_ADAPTER_ADDR}" "${@}" + exec /app/cql adapter -config "${COVENANT_CONF}" -no-password "${COVENANTSQL_ADAPTER_ADDR}" "${@}" ;; mysql-adapter) exec /app/cql-mysql-adapter -config "${COVENANT_CONF}" "${@}" ;; cli) - exec /app/cql console -config ${COVENANT_CONF} "${@}" + exec /app/cql console -config ${COVENANT_CONF} -no-password "${@}" ;; faucet) exec /app/cql-faucet -config ${COVENANT_CONF} "${@}" diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 293f56e93..53d154a64 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -489,7 +489,9 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + "web", + "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + "-no-password", "-bg-log-level", "debug", "127.0.0.1:4663", }, @@ -720,7 +722,9 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "web", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + "web", + "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), + "-no-password", "-bg-log-level", "debug", "127.0.0.1:4663", }, diff --git a/test/compatibility/specific_old.sh b/test/compatibility/specific_old.sh index ada976a52..c7dc3392d 100755 --- a/test/compatibility/specific_old.sh +++ b/test/compatibility/specific_old.sh @@ -67,9 +67,9 @@ if [[ $CLIENTBIN =~ "v0.4.0" ]]; then ${CLIENTBIN} -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ -command 'show tables;' | tee result.log else - ${CLIENTBIN} balance -config node_c/config.yaml + ${CLIENTBIN} balance -config node_c/config.yaml -no-password - ${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm '{"node":2}' | tee dsn.txt + ${CLIENTBIN} create -config node_c/config.yaml -wait-tx-confirm -no-password '{"node":2}' | tee dsn.txt #get dsn dsn=$(cat dsn.txt) @@ -78,10 +78,10 @@ else fi ${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'create table test_for_new_account(column1 int);' + -command 'create table test_for_new_account(column1 int);' -no-password ${CLIENTBIN} console -config ${PROJECT_DIR}/test/integration/node_c/config.yaml -dsn ${dsn} \ - -command 'show tables;' | tee result.log + -command 'show tables;' -no-password | tee result.log fi grep "1 row" result.log diff --git a/test/testnet_client/run.sh b/test/testnet_client/run.sh index a55b3cf22..41b7cadf8 100755 --- a/test/testnet_client/run.sh +++ b/test/testnet_client/run.sh @@ -20,20 +20,20 @@ ${BIN}/cql-utils -tool addrgen -skip-master-key | tee wallet.txt wallet=$(awk '{print $3}' wallet.txt) #transfer some coin to above address -${BIN}/cql transfer -config ${PROJECT_DIR}/conf/testnet/config.yaml -wait-tx-confirm \ +${BIN}/cql transfer -config ${PROJECT_DIR}/conf/testnet/config.yaml -wait-tx-confirm -no-password \ '{"addr":"'${wallet}'", "amount":"100000000 Particle"}' -${BIN}/cql balance +${BIN}/cql balance -no-password -${BIN}/cql create -wait-tx-confirm '{"node":2}' | tee dsn.txt +${BIN}/cql create -wait-tx-confirm -no-password '{"node":2}' | tee dsn.txt #get dsn dsn=$(cat dsn.txt) -${BIN}/cql console -dsn ${dsn} \ +${BIN}/cql console -dsn ${dsn} -no-password \ -command 'create table test_for_new_account(column1 int);' -${BIN}/cql console -dsn ${dsn} \ +${BIN}/cql console -dsn ${dsn} -no-password \ -command 'show tables;' | tee result.log grep "1 row" result.log From 8199f47c3a689ccd59072698affd036cb30dceab Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 11:48:07 +0800 Subject: [PATCH 131/244] Remove -password in all usage line. --- cmd/cql/internal/adapter.go | 2 +- cmd/cql/internal/balance.go | 2 +- cmd/cql/internal/cfg.go | 4 ++-- cmd/cql/internal/console.go | 2 +- cmd/cql/internal/create.go | 2 +- cmd/cql/internal/drop.go | 2 +- cmd/cql/internal/permission.go | 2 +- cmd/cql/internal/transfer.go | 2 +- 8 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 856409751..7b32bd4d5 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -33,7 +33,7 @@ var ( // CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ - UsageLine: "cql adapter [-config file] [-password masterkey] [-tmp-path path] [-bg-log-level level] [address]", + UsageLine: "cql adapter [-config file] [-tmp-path path] [-bg-log-level level] [address]", Short: "start a SQLChain adapter", Long: ` Adapter command serves a SQLChain adapter diff --git a/cmd/cql/internal/balance.go b/cmd/cql/internal/balance.go index 9e7c17b75..2d9dbd018 100644 --- a/cmd/cql/internal/balance.go +++ b/cmd/cql/internal/balance.go @@ -29,7 +29,7 @@ var ( // CmdBalance is cql balance command entity. var CmdBalance = &Command{ - UsageLine: "cql balance [-config file] [-password masterkey] [-token token_name]", + UsageLine: "cql balance [-config file] [-token token_name]", Short: "get the balance of current account", Long: ` Balance command can get CovenantSQL token balance of current account diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 153d088ed..42ee87822 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -48,10 +48,10 @@ var ( ) func addCommonFlags(cmd *Command) { - cmd.Flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql") - cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql(NOT SAFE, for debug or script only)") + cmd.Flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file for covenantsql (Usually no need to set, default is enough.)") // Undocumented, unstable debugging flags. + cmd.Flag.StringVar(&password, "password", "", "Master key password for covenantsql (NOT SAFE, for debug or script only)") cmd.Flag.BoolVar(&noPassword, "no-password", false, "Use empty password for master key") cmd.Flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, "Disable signature sign and verify, for testing") diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 57b6e0697..1b5de655e 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -46,7 +46,7 @@ import ( // CmdConsole is cql console command entity. var CmdConsole = &Command{ - UsageLine: "cql console [-config file] [-password masterkey] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", + UsageLine: "cql console [-config file] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", Short: "run a console for interactive sql operation", Long: ` Console command can run a interactive SQL console for CovenantSQL diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index 2d3a293a3..145f4b12a 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -26,7 +26,7 @@ import ( // CmdCreate is cql create command entity. var CmdCreate = &Command{ - UsageLine: "cql create [-config file] [-password masterkey] [-wait-tx-confirm] [dbmeta]", + UsageLine: "cql create [-config file] [-wait-tx-confirm] [dbmeta]", Short: "create a database", Long: ` Create CovenantSQL database by database meta info JSON string, meta info must include node count. diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 06cec51d5..5e2e3c23b 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -22,7 +22,7 @@ import ( // CmdDrop is cql drop command entity. var CmdDrop = &Command{ - UsageLine: "cql drop [-config file] [-password masterkey] [-wait-tx-confirm] [dsn/dbid]", + UsageLine: "cql drop [-config file] [-wait-tx-confirm] [dsn/dbid]", Short: "drop a database by dsn or database id", Long: ` Drop command can drop a database by DSN or database id diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index 695044d1c..34c7ce93b 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -26,7 +26,7 @@ import ( // CmdPermission is cql permission command entity. var CmdPermission = &Command{ - UsageLine: "cql permission [-config file] [-password masterkey] [-wait-tx-confirm] [perm_meta]", + UsageLine: "cql permission [-config file] [-wait-tx-confirm] [perm_meta]", Short: "update user's permission on specific sqlchain", Long: ` Permission command can give a user some specific permissions on your database diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index 97de11ed5..2b25cc5af 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -29,7 +29,7 @@ import ( // CmdTransfer is cql transfer command entity. var CmdTransfer = &Command{ - UsageLine: "cql transfer [-config file] [-password masterkey] [-wait-tx-confirm] [meta_json]", + UsageLine: "cql transfer [-config file] [-wait-tx-confirm] [meta_json]", Short: "transfer token to target account", Long: ` Transfer command can transfer your token to the target account. From 99b5532addb6f384d79ef7b69848bb92478ab097 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:09:05 +0800 Subject: [PATCH 132/244] Remove [] in required params. --- cmd/cql/internal/adapter.go | 4 ++-- cmd/cql/internal/console.go | 1 - cmd/cql/internal/create.go | 2 +- cmd/cql/internal/drop.go | 2 +- cmd/cql/internal/permission.go | 2 +- cmd/cql/internal/transfer.go | 2 +- cmd/cql/internal/web.go | 4 ++-- 7 files changed, 8 insertions(+), 9 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index 7b32bd4d5..d54fad032 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -33,7 +33,7 @@ var ( // CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ - UsageLine: "cql adapter [-config file] [-tmp-path path] [-bg-log-level level] [address]", + UsageLine: "cql adapter [-config file] [-tmp-path path] [-bg-log-level level] address", Short: "start a SQLChain adapter", Long: ` Adapter command serves a SQLChain adapter @@ -78,7 +78,7 @@ func runAdapter(cmd *Command, args []string) { bgServerInit() if len(args) != 1 { - ConsoleLog.Error("Adapter command need listern address as param") + ConsoleLog.Error("Adapter command need listen address as param") SetExitStatus(1) return } diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 1b5de655e..7cab756c2 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -50,7 +50,6 @@ var CmdConsole = &Command{ Short: "run a console for interactive sql operation", Long: ` Console command can run a interactive SQL console for CovenantSQL -The -dsn param is required e.g. cql console -dsn covenantsql://the_dsn_of_your_database diff --git a/cmd/cql/internal/create.go b/cmd/cql/internal/create.go index 145f4b12a..5ce5debfd 100644 --- a/cmd/cql/internal/create.go +++ b/cmd/cql/internal/create.go @@ -26,7 +26,7 @@ import ( // CmdCreate is cql create command entity. var CmdCreate = &Command{ - UsageLine: "cql create [-config file] [-wait-tx-confirm] [dbmeta]", + UsageLine: "cql create [-config file] [-wait-tx-confirm] db_meta_json", Short: "create a database", Long: ` Create CovenantSQL database by database meta info JSON string, meta info must include node count. diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index 5e2e3c23b..cc7561994 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -22,7 +22,7 @@ import ( // CmdDrop is cql drop command entity. var CmdDrop = &Command{ - UsageLine: "cql drop [-config file] [-wait-tx-confirm] [dsn/dbid]", + UsageLine: "cql drop [-config file] [-wait-tx-confirm] dsn/dbid", Short: "drop a database by dsn or database id", Long: ` Drop command can drop a database by DSN or database id diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/permission.go index 34c7ce93b..74ef4546a 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/permission.go @@ -26,7 +26,7 @@ import ( // CmdPermission is cql permission command entity. var CmdPermission = &Command{ - UsageLine: "cql permission [-config file] [-wait-tx-confirm] [perm_meta]", + UsageLine: "cql permission [-config file] [-wait-tx-confirm] perm_meta_json", Short: "update user's permission on specific sqlchain", Long: ` Permission command can give a user some specific permissions on your database diff --git a/cmd/cql/internal/transfer.go b/cmd/cql/internal/transfer.go index 2b25cc5af..ff23f3873 100644 --- a/cmd/cql/internal/transfer.go +++ b/cmd/cql/internal/transfer.go @@ -29,7 +29,7 @@ import ( // CmdTransfer is cql transfer command entity. var CmdTransfer = &Command{ - UsageLine: "cql transfer [-config file] [-wait-tx-confirm] [meta_json]", + UsageLine: "cql transfer [-config file] [-wait-tx-confirm] meta_json", Short: "transfer token to target account", Long: ` Transfer command can transfer your token to the target account. diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/web.go index a51aab7ce..7033a980a 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/web.go @@ -32,7 +32,7 @@ var ( // CmdWeb is cql web command. var CmdWeb = &Command{ - UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] [address]", + UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] address", Short: "start a SQLChain web explorer", Long: ` Web command serves a SQLChain web explorer. @@ -70,7 +70,7 @@ func runWeb(cmd *Command, args []string) { bgServerInit() if len(args) != 1 { - ConsoleLog.Error("Web command need listern address as param") + ConsoleLog.Error("Web command need listen address as param") SetExitStatus(1) return } From 75051baa05a28ed8142f8b5c73b76d0a723d6695 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:13:54 +0800 Subject: [PATCH 133/244] Exit with 0 if cql help has no errors. --- cmd/cql/internal/help.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index 9c3aa5a80..fb0122d3e 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -82,12 +82,12 @@ func runHelp(cmd *Command, args []string) { fmt.Fprintf(os.Stderr, cmd.Long) fmt.Fprintf(os.Stderr, "\nParams:\n") cmd.Flag.PrintDefaults() - SetExitStatus(2) return } //Not support command MainUsage() + SetExitStatus(2) } // MainUsage prints cql base help @@ -119,6 +119,5 @@ Use "cql help " for more information about a command. helpMsg += helpTail fmt.Fprintf(os.Stderr, helpMsg) - SetExitStatus(2) Exit() } From 8ef09e140f92515f838234cff72c7e8a5572ed27 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:23:17 +0800 Subject: [PATCH 134/244] Rename permission command to grant. --- cmd/cql/internal/{permission.go => grant.go} | 24 ++++++++++---------- cmd/cql/main.go | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) rename cmd/cql/internal/{permission.go => grant.go} (78%) diff --git a/cmd/cql/internal/permission.go b/cmd/cql/internal/grant.go similarity index 78% rename from cmd/cql/internal/permission.go rename to cmd/cql/internal/grant.go index 74ef4546a..1059ea049 100644 --- a/cmd/cql/internal/permission.go +++ b/cmd/cql/internal/grant.go @@ -24,26 +24,26 @@ import ( "github.com/CovenantSQL/CovenantSQL/types" ) -// CmdPermission is cql permission command entity. -var CmdPermission = &Command{ - UsageLine: "cql permission [-config file] [-wait-tx-confirm] perm_meta_json", - Short: "update user's permission on specific sqlchain", +// CmdGrant is cql grant command entity. +var CmdGrant = &Command{ + UsageLine: "cql grant [-config file] [-wait-tx-confirm] permission_meta_json", + Short: "grant a user's permissions on specific sqlchain", Long: ` -Permission command can give a user some specific permissions on your database +Grant command can give a user some specific permissions on your database e.g. - cql permission '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' + cql grant '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' Since CovenantSQL is blockchain database, you may want get confirm of permission update. e.g. - cql permission -wait-tx-confirm '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' + cql grant -wait-tx-confirm '{"chain":"your_chain_addr","user":"user_addr","perm":"perm_struct"}' `, } func init() { - CmdPermission.Run = runPermission + CmdGrant.Run = runGrant - addCommonFlags(CmdPermission) - addWaitFlag(CmdPermission) + addCommonFlags(CmdGrant) + addWaitFlag(CmdGrant) } type userPermission struct { @@ -60,11 +60,11 @@ type userPermPayload struct { Patterns []string `json:"patterns"` } -func runPermission(cmd *Command, args []string) { +func runGrant(cmd *Command, args []string) { configInit() if len(args) != 1 { - ConsoleLog.Error("Permission command need CovenantSQL perm_meta json string as param") + ConsoleLog.Error("Grant command need CovenantSQL perm_meta json string as param") SetExitStatus(1) return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 7529a0df9..b6cd69930 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -37,7 +37,7 @@ func init() { internal.CmdDrop, internal.CmdBalance, internal.CmdTransfer, - internal.CmdPermission, + internal.CmdGrant, internal.CmdWeb, internal.CmdAdapter, internal.CmdVersion, From a02b689d235f8feef2a3eb6732aae80967639221 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:28:28 +0800 Subject: [PATCH 135/244] Rename web command to explorer. --- cmd/cql/internal/console.go | 12 +++---- cmd/cql/internal/{web.go => explorer.go} | 42 ++++++++++++------------ cmd/cql/main.go | 2 +- 3 files changed, 28 insertions(+), 28 deletions(-) rename cmd/cql/internal/{web.go => explorer.go} (52%) diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index 7cab756c2..c125c233f 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -46,7 +46,7 @@ import ( // CmdConsole is cql console command entity. var CmdConsole = &Command{ - UsageLine: "cql console [-config file] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-web web_addr] [-adapter adapter_addr]", + UsageLine: "cql console [-config file] [-dsn dsn_string] [-command sqlcommand] [-file filename] [-out outputfile] [-no-rc true/false] [-single-transaction] [-variable variables] [-explorer explorer_addr] [-adapter adapter_addr]", Short: "run a console for interactive sql operation", Long: ` Console command can run a interactive SQL console for CovenantSQL @@ -82,7 +82,7 @@ func init() { CmdConsole.Flag.StringVar(&command, "command", "", "Run only single command (SQL or usql internal command) and exit") CmdConsole.Flag.StringVar(&fileName, "file", "", "Execute commands from file and exit") CmdConsole.Flag.StringVar(&adapterAddr, "adapter", "", "Address to serve a database chain adapter, e.g. :7784") - CmdConsole.Flag.StringVar(&webAddr, "web", "", "Address serve a database chain explorer, e.g. :8546") + CmdConsole.Flag.StringVar(&explorerAddr, "explorer", "", "Address serve a database chain explorer, e.g. :8546") } // SqTime provides a type that will correctly scan the various timestamps @@ -369,8 +369,8 @@ func runConsole(cmd *Command, args []string) { defer cancelFunc() } - if webAddr != "" { - cancelFunc := startWebServer(webAddr) + if explorerAddr != "" { + cancelFunc := startExplorerServer(explorerAddr) defer cancelFunc() } @@ -391,8 +391,8 @@ func runConsole(cmd *Command, args []string) { return } - if adapterAddr != "" || webAddr != "" { - ConsoleLog.Printf("Ctrl + C to stop background server on %s %s\n", adapterAddr, webAddr) + if adapterAddr != "" || explorerAddr != "" { + ConsoleLog.Printf("Ctrl + C to stop background server on %s %s\n", adapterAddr, explorerAddr) <-utils.WaitForExit() } } diff --git a/cmd/cql/internal/web.go b/cmd/cql/internal/explorer.go similarity index 52% rename from cmd/cql/internal/web.go rename to cmd/cql/internal/explorer.go index 7033a980a..0939900ab 100644 --- a/cmd/cql/internal/web.go +++ b/cmd/cql/internal/explorer.go @@ -24,62 +24,62 @@ import ( ) var ( - webAddr string // Web addr + explorerAddr string // Explorer addr - webService *observer.Service - webHTTPServer *http.Server + explorerService *observer.Service + explorerHTTPServer *http.Server ) -// CmdWeb is cql web command. -var CmdWeb = &Command{ - UsageLine: "cql web [-config file] [-tmp-path path] [-bg-log-level level] address", - Short: "start a SQLChain web explorer", +// CmdExplorer is cql explorer command. +var CmdExplorer = &Command{ + UsageLine: "cql explorer [-config file] [-tmp-path path] [-bg-log-level level] address", + Short: "start a SQLChain explorer explorer", Long: ` -Web command serves a SQLChain web explorer. +Explorer command serves a SQLChain web explorer. e.g. - cql web 127.0.0.1:8546 + cql explorer 127.0.0.1:8546 `, } func init() { - CmdWeb.Run = runWeb + CmdExplorer.Run = runExplorer - addCommonFlags(CmdWeb) - addBgServerFlag(CmdWeb) + addCommonFlags(CmdExplorer) + addBgServerFlag(CmdExplorer) } -func startWebServer(webAddr string) func() { +func startExplorerServer(explorerAddr string) func() { var err error - webService, webHTTPServer, err = observer.StartObserver(webAddr, Version) + explorerService, explorerHTTPServer, err = observer.StartObserver(explorerAddr, Version) if err != nil { ConsoleLog.WithError(err).Error("start explorer failed") SetExitStatus(1) return nil } - ConsoleLog.Infof("web server started on %s", webAddr) + ConsoleLog.Infof("explorer server started on %s", explorerAddr) return func() { - _ = observer.StopObserver(webService, webHTTPServer) + _ = observer.StopObserver(explorerService, explorerHTTPServer) ConsoleLog.Info("explorer stopped") } } -func runWeb(cmd *Command, args []string) { +func runExplorer(cmd *Command, args []string) { configInit() bgServerInit() if len(args) != 1 { - ConsoleLog.Error("Web command need listen address as param") + ConsoleLog.Error("Explorer command need listen address as param") SetExitStatus(1) return } - webAddr = args[0] + explorerAddr = args[0] - cancelFunc := startWebServer(webAddr) + cancelFunc := startExplorerServer(explorerAddr) ExitIfErrors() defer cancelFunc() - ConsoleLog.Printf("Ctrl + C to stop web server on %s\n", webAddr) + ConsoleLog.Printf("Ctrl + C to stop explorer server on %s\n", explorerAddr) <-utils.WaitForExit() } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index b6cd69930..72e2972fc 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -38,7 +38,7 @@ func init() { internal.CmdBalance, internal.CmdTransfer, internal.CmdGrant, - internal.CmdWeb, + internal.CmdExplorer, internal.CmdAdapter, internal.CmdVersion, internal.CmdHelp, From 4c5d3f86fda7ec3f9d9d59e7589180fee6d70e3a Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:31:56 +0800 Subject: [PATCH 136/244] Format cql help command tab --- cmd/cql/internal/help.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/cql/internal/help.go b/cmd/cql/internal/help.go index fb0122d3e..042ccda82 100644 --- a/cmd/cql/internal/help.go +++ b/cmd/cql/internal/help.go @@ -86,8 +86,8 @@ func runHelp(cmd *Command, args []string) { } //Not support command - MainUsage() SetExitStatus(2) + MainUsage() } // MainUsage prints cql base help @@ -111,8 +111,8 @@ Use "cql help " for more information about a command. continue } cmdName := cmd.Name() - if len(cmd.Name()) < 8 { - cmdName += "\t" + for len(cmdName) < 10 { + cmdName += " " } helpMsg += "\t" + cmdName + "\t" + cmd.Short + "\n" } From b5c005a9d314c1961381f949810e3636470ddee0 Mon Sep 17 00:00:00 2001 From: laodouya Date: Wed, 20 Mar 2019 12:50:27 +0800 Subject: [PATCH 137/244] Fix cql explorer unit test. --- sqlchain/observer/observation_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 53d154a64..284b3a919 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -489,7 +489,7 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "web", + "explorer", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-no-password", "-bg-log-level", "debug", @@ -722,7 +722,7 @@ func TestFullProcess(t *testing.T) { observerCmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql.test"), []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/observer.cover.out"), - "web", + "explorer", "-config", FJ(testWorkingDir, "./observation/node_observer/config.yaml"), "-no-password", "-bg-log-level", "debug", From 207db598adfbd2ac1b30d8e0354d788cc2aee310 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 16:38:40 +0800 Subject: [PATCH 138/244] Fix bug in dbms updatePermission call --- worker/dbms.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/worker/dbms.go b/worker/dbms.go index 9ea0622d8..9004bf683 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -295,10 +295,11 @@ func (dbms *DBMS) UpdatePermission(dbID proto.DatabaseID, user proto.AccountAddr } else { exist := false for _, u := range profile.Users { - u.Address = user - u.Permission = permStat.Permission - u.Status = permStat.Status - exist = true + if u.Address == user { + u.Permission = permStat.Permission + u.Status = permStat.Status + exist = true + } } if !exist { profile.Users = append(profile.Users, &types.SQLChainUser{ From c2d1e3dc578155aee201fb6b870b574cd1d90fee Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 16:39:13 +0800 Subject: [PATCH 139/244] Allow permission revocation in transaction processing --- blockproducer/metastate.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/blockproducer/metastate.go b/blockproducer/metastate.go index 2473d899b..c21c68e49 100644 --- a/blockproducer/metastate.go +++ b/blockproducer/metastate.go @@ -825,13 +825,6 @@ func (s *metaState) updatePermission(tx *types.UpdatePermission) (err error) { }).WithError(ErrDatabaseNotFound).Error("unexpected error in updatePermission") return ErrDatabaseNotFound } - if !tx.Permission.IsValid() { - log.WithFields(log.Fields{ - "permission": tx.Permission, - "dbID": tx.TargetSQLChain.DatabaseID(), - }).WithError(ErrInvalidPermission).Error("unexpected error in updatePermission") - return ErrInvalidPermission - } // check whether sender has super privilege and find targetUser numOfSuperUsers := 0 From e6f964dbbe0d309c362eca8261b8a6835b9279bf Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 17:04:09 +0800 Subject: [PATCH 140/244] Update faucet to support various new web operations --- cmd/cql-faucet/api.go | 404 +++++++++++++++++++++++----- cmd/cql-faucet/config.go | 34 +-- cmd/cql-faucet/errors.go | 24 +- cmd/cql-faucet/main.go | 14 +- cmd/cql-faucet/persistence.go | 214 +++------------ cmd/cql-faucet/resolver.go | 70 ----- cmd/cql-faucet/rpc.go | 30 --- cmd/cql-faucet/verifier.go | 452 -------------------------------- cmd/cql-faucet/verifier_test.go | 68 ----- 9 files changed, 390 insertions(+), 920 deletions(-) delete mode 100644 cmd/cql-faucet/resolver.go delete mode 100644 cmd/cql-faucet/rpc.go delete mode 100644 cmd/cql-faucet/verifier.go delete mode 100644 cmd/cql-faucet/verifier_test.go diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index 9fca5ad0a..f3e16c478 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -20,142 +20,418 @@ import ( "encoding/json" "fmt" "net/http" + "net/url" "regexp" "time" + "github.com/gorilla/handlers" "github.com/gorilla/mux" + "github.com/pingcap/errors" + pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/crypto" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" "github.com/CovenantSQL/CovenantSQL/utils/log" ) const ( - argAddress = "address" - argMediaURL = "media_url" - argApplicationID = "id" + argAccount = "account" + argEmail = "email" + argDatabase = "db" + argTx = "tx" ) var ( - apiTimeout = time.Second * 10 - regexAddress = regexp.MustCompile("^[a-zA-Z0-9]{64}$") - regexMediaURL = regexp.MustCompile("^(http|ftp|https)://([\\w\\-_]+(?:(?:\\.[\\w\\-_]+)+))([\\w\\-\\.,@?^=%&:/~\\+#]*[\\w\\-\\@?^=%&/~\\+#])?$") - regexApplicationID = regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-4[a-fA-F0-9]{3}-[8|9|aA|bB][a-fA-F0-9]{3}-[a-fA-F0-9]{12}$") + apiTimeout = time.Minute * 10 + regexAccount = regexp.MustCompile("^[a-zA-Z0-9]{64}$") ) +func jsonContentType(next http.Handler) http.Handler { + return http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + // test if request is post + if r.Method == http.MethodPost && + r.Header.Get("Content-Type") == "application/json" && + r.Body != nil { + // parse json and set to form in request + var d map[string]interface{} + + if err := json.NewDecoder(r.Body).Decode(&d); err != nil { + // decode failed + log.WithError(err).Warning("decode request failed") + } else { + // fill data to new form + r.Form = make(url.Values) + + for k, v := range d { + r.Form.Set(k, fmt.Sprintf("%v", v)) + } + + r.PostForm = r.Form + } + } + + next.ServeHTTP(rw, r) + }) +} + func sendResponse(code int, success bool, msg interface{}, data interface{}, rw http.ResponseWriter) { msgStr := "ok" if msg != nil { msgStr = fmt.Sprint(msg) } - // cors support - rw.Header().Set("Access-Control-Allow-Origin", "*") rw.WriteHeader(code) - json.NewEncoder(rw).Encode(map[string]interface{}{ + _ = json.NewEncoder(rw).Encode(map[string]interface{}{ "status": msgStr, "success": success, "data": data, }) } -func corsHandler(rw http.ResponseWriter, r *http.Request) { - rw.Header().Set("Access-Control-Allow-Origin", "*") - rw.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS, PUT, DELETE") - rw.Header().Set("Access-Control-Allow-Headers", "Accept, Content-Type, Content-Length, Accept-Encoding, Authorization") - rw.WriteHeader(http.StatusOK) - rw.Write([]byte{}) +type service struct { + p *Persistence + addr proto.AccountAddress } -type tokenDispenser struct { - p *Persistence +func (d *service) parseAccountAddress(account string) (addr proto.AccountAddress, err error) { + var h *hash.Hash + + if h, err = hash.NewHashFromStr(account); err != nil { + return + } + + addr = proto.AccountAddress(*h) + return } -func (d *tokenDispenser) poll(rw http.ResponseWriter, r *http.Request) { +func (d *service) applyToken(rw http.ResponseWriter, r *http.Request) { // get args - applicationID := r.FormValue(argApplicationID) - address := r.FormValue(argAddress) + var ( + account = r.FormValue(argAccount) + email = r.FormValue(argEmail) + err error + applicationID string + txHash hash.Hash + ) // validate args - if !regexAddress.MatchString(address) { - sendResponse(http.StatusBadRequest, false, ErrInvalidAddress.Error(), nil, rw) + if !regexAccount.MatchString(account) { + // error + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) return } - if !regexApplicationID.MatchString(applicationID) { - sendResponse(http.StatusBadRequest, false, ErrInvalidApplicationID.Error(), nil, rw) + // check limits + if err = d.p.checkAccountLimit(account); err != nil { + sendResponse(http.StatusTooManyRequests, false, err.Error(), nil, rw) return } - if r, err := d.p.queryState(address, applicationID); err != nil { - // error - sendResponse(http.StatusBadRequest, false, err.Error(), nil, rw) - } else { - // build response - sendResponse(http.StatusOK, true, nil, map[string]interface{}{ - "id": r.applicationID, - "state": int(r.state), - "state_desc": r.state.String(), - "reason": r.failReason, - }, rw) + if err = d.p.checkEmailLimit(email); err != nil { + sendResponse(http.StatusTooManyRequests, false, err.Error(), nil, rw) + return + } + + // account address + if accountAddr, err := d.parseAccountAddress(account); err != nil { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } else if txHash, err = client.TransferToken(accountAddr, uint64(d.p.tokenAmount), types.Particle); err != nil { + // send token + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + // add record + if applicationID, err = d.p.addRecord(account, email); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return } + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "id": applicationID, + "tx": txHash.String(), + }, rw) + return } -func (d *tokenDispenser) application(rw http.ResponseWriter, r *http.Request) { +func (d *service) getBalance(rw http.ResponseWriter, r *http.Request) { // get args - address := r.FormValue(argAddress) - mediaURL := r.FormValue(argMediaURL) + account := r.FormValue(argAccount) - // validate args - if !regexAddress.MatchString(address) { + if !regexAccount.MatchString(account) { // error - sendResponse(http.StatusBadRequest, false, ErrInvalidAddress.Error(), nil, rw) + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) return } - if !regexMediaURL.MatchString(mediaURL) { - // error - sendResponse(http.StatusBadRequest, false, ErrInvalidURL, nil, rw) + // get account balance + var ( + req = new(types.QueryAccountTokenBalanceReq) + resp = new(types.QueryAccountTokenBalanceResp) + err error + ) + + if req.Addr, err = d.parseAccountAddress(account); err != nil { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + if err = rpc.RequestBP(route.MCCQueryAccountTokenBalance.String(), req, resp); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + sendResponse(http.StatusOK, true, nil, map[string]interface{}{"balance": resp.Balance}, rw) +} + +func (d *service) createDB(rw http.ResponseWriter, r *http.Request) { + // get args + account := r.FormValue(argAccount) + + if !regexAccount.MatchString(account) { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + var ( + addr proto.AccountAddress + txCreateHash hash.Hash + txCreateState pi.TransactionState + dsn string + dbID proto.DatabaseID + dbAccountAddr proto.AccountAddress + err error + cfg *client.Config + txUpdatePermHash hash.Hash + ) + + if addr, err = d.parseAccountAddress(account); err != nil { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + meta := client.ResourceMeta{} + meta.Node = 1 + + if txCreateHash, dsn, err = client.Create(meta); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + if cfg, err = client.ParseDSN(dsn); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + dbID = proto.DatabaseID(cfg.DatabaseID) + + if txCreateState, err = client.WaitTxConfirmation(r.Context(), txCreateHash); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } else if txCreateState != pi.TransactionStateConfirmed { + sendResponse(http.StatusInternalServerError, false, "create database failed", nil, rw) + return + } + + if dbAccountAddr, err = dbID.AccountAddress(); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + // update permission, add current user as admin + if txUpdatePermHash, err = client.UpdatePermission( + addr, dbAccountAddr, types.UserPermissionFromRole(types.Admin)); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "tx_create": txCreateHash.String(), + "tx_update_permission": txUpdatePermHash.String(), + "db": dbID, + }, rw) +} + +func (d *service) getDBBalance(rw http.ResponseWriter, r *http.Request) { + // get args + account := r.FormValue(argAccount) + dbID := r.FormValue(argDatabase) + + if !regexAccount.MatchString(account) { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + var ( + addr proto.AccountAddress + req = new(types.QuerySQLChainProfileReq) + resp = new(types.QuerySQLChainProfileResp) + err error + ) + + if addr, err = d.parseAccountAddress(account); err != nil { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + req.DBID = proto.DatabaseID(dbID) + + if err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) return } - if applicationID, err := d.p.enqueueApplication(address, mediaURL); err != nil { - var status = http.StatusBadRequest - if err == ErrAddressQuotaExceeded || err == ErrAccountQuotaExceeded { - status = http.StatusTooManyRequests - } else if err == ErrEnqueueApplication { - status = http.StatusInternalServerError + for _, user := range resp.Profile.Users { + if user.Address == addr { + sendResponse(http.StatusOK, true, nil, map[string]interface{}{ + "deposit": user.Deposit, + "arrears": user.Arrears, + "advance_payment": user.AdvancePayment, + }, rw) + return } - sendResponse(status, false, err.Error(), nil, rw) - } else { - sendResponse(http.StatusOK, true, nil, map[string]interface{}{ - "id": applicationID, - }, rw) } - return + sendResponse(http.StatusBadRequest, false, ErrInvalidDatabase.Error(), nil, rw) +} + +func (d *service) privatizeDB(rw http.ResponseWriter, r *http.Request) { + // get args + account := r.FormValue(argAccount) + rawDBID := r.FormValue(argDatabase) + + if !regexAccount.MatchString(account) { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + if !regexAccount.MatchString(rawDBID) { + sendResponse(http.StatusBadRequest, false, ErrInvalidDatabase.Error(), nil, rw) + return + } + + var ( + addr proto.AccountAddress + dbID = proto.DatabaseID(rawDBID) + dbAccountAddr proto.AccountAddress + req = new(types.QuerySQLChainProfileReq) + resp = new(types.QuerySQLChainProfileResp) + err error + txHash hash.Hash + ) + + if addr, err = d.parseAccountAddress(account); err != nil { + sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) + return + } + + req.DBID = dbID + + if err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp); err != nil { + sendResponse(http.StatusInternalServerError, false, ErrInvalidDatabase.Error(), nil, rw) + return + } + + // check current account existence + found := false + + for _, user := range resp.Profile.Users { + if user.Address == addr && user.Permission.HasSuperPermission() { + found = true + break + } + } + + if !found { + sendResponse(http.StatusBadRequest, false, ErrInvalidDatabase.Error(), nil, rw) + return + } + + if dbAccountAddr, err = dbID.AccountAddress(); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + if txHash, err = client.UpdatePermission(d.addr, dbAccountAddr, types.UserPermissionFromRole(types.Void)); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + sendResponse(http.StatusOK, true, nil, map[string]interface{}{"tx": txHash}, rw) } -func startAPI(v *Verifier, p *Persistence, listenAddr string) (server *http.Server, err error) { +func (d *service) waitTx(rw http.ResponseWriter, r *http.Request) { + // get args + tx := r.FormValue("tx") + + var ( + txHash *hash.Hash + err error + txState pi.TransactionState + ) + + if txHash, err = hash.NewHashFromStr(tx); err != nil { + sendResponse(http.StatusBadRequest, false, err.Error(), nil, rw) + return + } + + if txState, err = client.WaitTxConfirmation(r.Context(), *txHash); err != nil { + sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) + return + } + + sendResponse(http.StatusOK, false, nil, map[string]interface{}{"state": txState.String()}, rw) +} + +func startAPI(p *Persistence, listenAddr string) (server *http.Server, err error) { router := mux.NewRouter() router.HandleFunc("/", func(rw http.ResponseWriter, r *http.Request) { sendResponse(http.StatusOK, true, nil, nil, rw) }).Methods("GET") - dispenser := &tokenDispenser{ - p: p, + var ( + addr proto.AccountAddress + pk *asymmetric.PublicKey + ) + + if pk, err = kms.GetLocalPublicKey(); err != nil { + err = errors.Wrapf(err, "get faucet account address failed") + return + } else if addr, err = crypto.PubKeyHash(pk); err != nil { + err = errors.Wrapf(err, "convert account address failed") + return + } + + service := &service{ + p: p, + addr: addr, } v1Router := router.PathPrefix("/v1").Subrouter() - v1Router.HandleFunc("/faucet", dispenser.application).Methods("POST") - v1Router.HandleFunc("/faucet", dispenser.poll).Methods("GET") - v1Router.HandleFunc("/faucet", corsHandler).Methods("OPTIONS") + v1Router.Use(jsonContentType) + v1Router.HandleFunc("/apply_token", service.applyToken).Methods("POST") + v1Router.HandleFunc("/account_balance", service.getBalance).Methods("GET", "POST") + v1Router.HandleFunc("/db_balance", service.getDBBalance).Methods("GET", "POST") + v1Router.HandleFunc("/create_database", service.getDBBalance).Methods("POST") + v1Router.HandleFunc("/privatize", service.privatizeDB).Methods("POST") + v1Router.HandleFunc("/wait_tx", service.waitTx).Methods("GET", "POST") server = &http.Server{ Addr: listenAddr, WriteTimeout: apiTimeout, ReadTimeout: apiTimeout, IdleTimeout: apiTimeout, - Handler: router, + Handler: handlers.CORS( + handlers.AllowedHeaders([]string{"Content-Type"}), + )(router), } go func() { diff --git a/cmd/cql-faucet/config.go b/cmd/cql-faucet/config.go index 00e0cb2ed..1f4205a84 100644 --- a/cmd/cql-faucet/config.go +++ b/cmd/cql-faucet/config.go @@ -18,25 +18,21 @@ package main import ( "io/ioutil" - "time" yaml "gopkg.in/yaml.v2" "github.com/CovenantSQL/CovenantSQL/utils/log" ) -// Config defines the configurable options for faucet application backend. +// Config defines the configurable options for faucet applyToken backend. type Config struct { // faucet server related - ListenAddr string `yaml:"ListenAddr"` - URLRequired string `yaml:"URLRequired"` // can be a part of a valid url - ContentRequired []string `yaml:"ContentRequired"` - FaucetAmount int64 `yaml:"FaucetAmount"` - DatabaseID string `yaml:"DatabaseID"` // database id for persistence - LocalDatabase bool `yaml:"UseLocalDatabase"` // use local sqlite3 database for persistence - AddressDailyQuota uint `yaml:"AddressDailyQuota"` - AccountDailyQuota uint `yaml:"AccountDailyQuota"` - VerificationInterval time.Duration `yaml:"VerificationInterval"` + ListenAddr string `yaml:"ListenAddr"` + FaucetAmount int64 `yaml:"FaucetAmount"` + DatabaseID string `yaml:"DatabaseID"` // database id for persistence + LocalDatabase bool `yaml:"UseLocalDatabase"` // use local sqlite3 database for persistence + AddressDailyQuota uint `yaml:"AddressDailyQuota"` + AccountDailyQuota uint `yaml:"AccountDailyQuota"` } type confWrapper struct { @@ -72,21 +68,15 @@ func LoadConfig(configPath string) (config *Config, err error) { return } - if config.URLRequired == "" && len(config.ContentRequired) == 0 { - err = ErrInvalidFaucetConfig - log.Error("at least one URL/Content config for faucet application is required") - return - } - if config.DatabaseID == "" { err = ErrInvalidFaucetConfig - log.Error("a database id is required for faucet application persistence") + log.Error("a database id is required for faucet applyToken persistence") return } if config.FaucetAmount <= 0 { err = ErrInvalidFaucetConfig - log.Error("a positive faucet amount is required for every application") + log.Error("a positive faucet amount is required for every applyToken") return } @@ -103,11 +93,5 @@ func LoadConfig(configPath string) (config *Config, err error) { return } - if config.VerificationInterval.Nanoseconds() <= 0 { - log.Warning("a valid VerificationInterval is required, 30 seconds assumed") - - config.VerificationInterval = 30 * time.Second - } - return } diff --git a/cmd/cql-faucet/errors.go b/cmd/cql-faucet/errors.go index 0e8f8e609..2b0c83684 100644 --- a/cmd/cql-faucet/errors.go +++ b/cmd/cql-faucet/errors.go @@ -21,22 +21,16 @@ import "errors" var ( // user errors - // ErrInvalidURL represents the invalid media url error. - ErrInvalidURL = errors.New("INVALID_URL") - // ErrInvalidAddress represents address is not a valid test net address. - ErrInvalidAddress = errors.New("INVALID_ADDRESS") - // ErrInvalidApplicationID represents the application id provided is invalid. - ErrInvalidApplicationID = errors.New("INVALID_APPLICATION_ID") - // ErrAccountQuotaExceeded represents the applicant has exceeded the account daily application quota. + // ErrInvalidAccount represents account is not a valid account. + ErrInvalidAccount = errors.New("INVALID_ADDRESS") + // ErrInvalidDatabase represents database id is not valid. + ErrInvalidDatabase = errors.New("INVALID_DATABASE") + // ErrAccountQuotaExceeded represents the applicant has exceeded the account daily applyToken quota. ErrAccountQuotaExceeded = errors.New("ACCOUNT_QUOTA_EXCEEDED") - // ErrAddressQuotaExceeded represents the applicant has exceeded the address daily application quota. - ErrAddressQuotaExceeded = errors.New("ADDRESS_QUOTA_EXCEEDED") - // ErrEnqueueApplication represents failing to enqueue the application request. - ErrEnqueueApplication = errors.New("ENQUEUE_FAILED") - // ErrRequiredContentNotExists represents invalid application which contains no advertising content. - ErrRequiredContentNotExists = errors.New("NO_REQUIRED_CONTENT") - // ErrRequiredURLNotExists represents invalid application which contains no advertising url. - ErrRequiredURLNotExists = errors.New("NO_REQUIRED_LINK") + // ErrEmailQuotaExceeded represents the applicant has exceeded the account daily applyToken quota. + ErrEmailQuotaExceeded = errors.New("EMAIL_QUOTA_EXCEEDED") + // ErrEnqueueApplication represents failing to enqueue the applyToken request. + ErrEnqueueApplication = errors.New("ADD_RECORD_FAILED") // system errors diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index 6debac8fd..4d86be5bf 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -86,18 +86,9 @@ func main() { return } - // init verifier - var v *Verifier - if v, err = NewVerifier(cfg, p); err != nil { - return - } - - // start verifier - go v.run() - // init faucet api var server *http.Server - if server, err = startAPI(v, p, cfg.ListenAddr); err != nil { + if server, err = startAPI(p, cfg.ListenAddr); err != nil { return } @@ -105,9 +96,6 @@ func main() { <-utils.WaitForExit() - // stop verifier - v.stop() - // stop faucet api ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() diff --git a/cmd/cql-faucet/persistence.go b/cmd/cql-faucet/persistence.go index 7b9ef9d5f..b48f6c5a6 100644 --- a/cmd/cql-faucet/persistence.go +++ b/cmd/cql-faucet/persistence.go @@ -32,39 +32,6 @@ import ( _ "github.com/CovenantSQL/go-sqlite3-encrypt" ) -// State defines the token application request state. -type State int - -const ( - // StateApplication represents application request initial state. - StateApplication State = iota - // StateVerified represents the application request has already been verified. - StateVerified - // StateDispensed represents the application request has been fulfilled and tokens are dispensed. - StateDispensed - // StateFailed represents the application is invalid or maybe quota exceeded. - StateFailed - // StateUnknown represents invalid state - StateUnknown -) - -func (s State) String() string { - switch s { - case StateApplication: - return "StateApplication" - case StateVerified: - return "StateVerified" - case StateDispensed: - return "StateDispensed" - case StateFailed: - return "StateFailed" - case StateUnknown: - return "StateUnknown" - } - - return "" -} - // Persistence defines the persistence api for faucet service. type Persistence struct { db *sql.DB @@ -75,34 +42,28 @@ type Persistence struct { // applicationRecord defines single record for verification. type applicationRecord struct { - rowID int64 - applicationID string - platform string - address string - mediaURL string - account string - state State - tokenAmount int64 // covenantsql could store uint64 value, use int64 instead - failReason string + id string + rowID int64 + account string + email string + tokenAmount int64 // covenantsql could not store uint64 value, use int64 instead + createTime time.Time } func (r *applicationRecord) asMap() (result map[string]interface{}) { result = make(map[string]interface{}) + result["id"] = r.id result["rowID"] = r.rowID - result["applicationID"] = r.applicationID - result["platform"] = r.platform - result["address"] = r.address - result["mediaURL"] = r.mediaURL result["account"] = r.account - result["state"] = r.state.String() + result["email"] = r.email result["tokenAmount"] = r.tokenAmount - result["failReason"] = r.failReason + result["createTime"] = r.createTime.String() return } -// NewPersistence returns a new application persistence api. +// NewPersistence returns a new applyToken persistence api. func NewPersistence(faucetCfg *Config) (p *Persistence, err error) { p = &Persistence{ accountDailyQuota: faucetCfg.AccountDailyQuota, @@ -135,28 +96,23 @@ func NewPersistence(faucetCfg *Config) (p *Persistence, err error) { func (p *Persistence) initDB() (err error) { _, err = p.db.ExecContext(context.Background(), `CREATE TABLE IF NOT EXISTS faucet_records ( - id string unique, - platform string, - account string, - url string, - address string, - state int, + id text unique, + account text, + email text, amount bigint, - reason string, ctime datetime )`) return } -func (p *Persistence) checkAccountLimit(platform string, account string) (err error) { - // TODO, consider cache the limits in memory? +func (p *Persistence) checkAccountLimit(account string) (err error) { timeOfDayStart := time.Now().UTC().Format("2006-01-02 00:00:00") // account limit check row := p.db.QueryRowContext(context.Background(), `SELECT COUNT(1) AS cnt FROM faucet_records - WHERE ctime >= ? AND platform = ? AND account = ? AND state IN (?, ?, ?)`, - timeOfDayStart, platform, account, StateApplication, StateVerified, StateDispensed) + WHERE ctime >= ? AND account = ?`, + timeOfDayStart, account) var result uint @@ -167,25 +123,21 @@ func (p *Persistence) checkAccountLimit(platform string, account string) (err er if result >= p.accountDailyQuota { // quota exceeded - log.WithFields(log.Fields{ - "account": account, - "platform": platform, - }).Error("daily account quota exceeded") + log.WithField("account", account).Error("daily account quota exceeded") return ErrAccountQuotaExceeded } return } -func (p *Persistence) checkAddressLimit(address string) (err error) { - // TODO, consider cache the limits in memory? +func (p *Persistence) checkEmailLimit(email string) (err error) { timeOfDayStart := time.Now().UTC().Format("2006-01-02 00:00:00") // account limit check row := p.db.QueryRowContext(context.Background(), `SELECT COUNT(1) AS cnt FROM faucet_records - WHERE ctime >= ? AND address = ? AND state IN (?, ?, ?)`, - timeOfDayStart, address, StateApplication, StateVerified, StateDispensed) + WHERE ctime >= ? AND email = ?`, + timeOfDayStart, email) var result uint @@ -196,142 +148,38 @@ func (p *Persistence) checkAddressLimit(address string) (err error) { if result >= p.addressDailyQuota { // quota exceeded - log.WithFields(log.Fields{ - "address": address, - }).Error("daily address quota exceeded") - return ErrAddressQuotaExceeded + log.WithField("email", email).Error("daily email quota exceeded") + return ErrEmailQuotaExceeded } return } -// enqueueApplication record a new token application to CovenantSQL database. -func (p *Persistence) enqueueApplication(address string, mediaURL string) (applicationID string, err error) { - // resolve account name in address - var meta urlMeta - meta, err = extractPlatformInURL(mediaURL) - if err != nil { - log.WithFields(log.Fields{ - "address": address, - "mediaURL": mediaURL, - }).Errorf("enqueue application with invalid url: %v", err) - return - } - - // check limits - if err = p.checkAccountLimit(meta.platform, meta.account); err != nil { - return - } - if err = p.checkAddressLimit(address); err != nil { - return - } - +// addRecord record a new token applyToken to CovenantSQL database. +func (p *Persistence) addRecord(account string, email string) (applicationID string, err error) { // generate uuid applicationID = uuid.Must(uuid.NewV4()).String() + now := time.Now().UTC().Format("2006-01-02 15:04:05") // enqueue _, err = p.db.ExecContext(context.Background(), `INSERT INTO faucet_records ( id, - platform, account, - url, - address, - state, + email, amount, - reason, ctime - ) VALUES (?, ?, ?, ?, ?, ?, ?, '', CURRENT_TIMESTAMP)`, - applicationID, meta.platform, meta.account, mediaURL, address, StateApplication, p.tokenAmount) + ) VALUES (?, ?, ?, ?, ?)`, + applicationID, account, email, p.tokenAmount, now) if err != nil { log.WithFields(log.Fields{ - "address": address, - "mediaURL": mediaURL, - }).Errorf("enqueue application failed: %v", err) + "account": account, + "email": email, + }).Errorf("enqueue applyToken failed: %v", err) err = ErrEnqueueApplication } return } - -// queryState returns faucet application state. -func (p *Persistence) queryState(address string, applicationID string) (record *applicationRecord, err error) { - row := p.db.QueryRowContext(context.Background(), - `SELECT id, rowid, platform, address, url, account, state, amount, reason FROM faucet_records WHERE - address = ? AND id = ? LIMIT 1`, address, applicationID) - - record = &applicationRecord{} - err = row.Scan(&record.applicationID, &record.rowID, &record.platform, &record.address, &record.mediaURL, - &record.account, &record.state, &record.tokenAmount, &record.failReason) - - return -} - -// getRecords fetch records need to be processed. -func (p *Persistence) getRecords(startRowID int64, platform string, state State, limitCount int) (records []*applicationRecord, err error) { - var rows *sql.Rows - - args := make([]interface{}, 0) - baseSQL := "SELECT id, rowid, platform, address, url, account, state, amount FROM faucet_records WHERE 1=1 " - - if startRowID > 0 { - baseSQL += " AND rowid >= ? " - args = append(args, startRowID) - } - if platform != "" { - baseSQL += " AND platform = ? " - args = append(args, platform) - } - if state != StateUnknown { - baseSQL += " AND state = ? " - args = append(args, state) - } - if limitCount > 0 { - baseSQL += " LIMIT ?" - args = append(args, limitCount) - } - - rows, err = p.db.QueryContext(context.Background(), baseSQL, args...) - - for rows.Next() { - r := &applicationRecord{} - - if err = rows.Scan(&r.applicationID, &r.rowID, &r.platform, &r.address, &r.mediaURL, - &r.account, &r.state, &r.tokenAmount); err != nil { - return - } - - records = append(records, r) - } - - return -} - -// updateRecord updates application record. -func (p *Persistence) updateRecord(record *applicationRecord) (err error) { - _, err = p.db.ExecContext(context.Background(), - `UPDATE faucet_records SET - id = ?, - platform = ?, - address = ?, - url = ?, - account = ?, - state = ?, - reason = ?, - amount = ? - WHERE rowid = ?`, - record.applicationID, - record.platform, - record.address, - record.mediaURL, - record.account, - int(record.state), - record.failReason, - record.tokenAmount, - record.rowID, - ) - - return -} diff --git a/cmd/cql-faucet/resolver.go b/cmd/cql-faucet/resolver.go deleted file mode 100644 index 8bfabcf89..000000000 --- a/cmd/cql-faucet/resolver.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "net/url" - "strings" -) - -const ( - platformFacebook = "facebook" - platformTwitter = "twitter" - platformWeibo = "weibo" -) - -type urlMeta struct { - platform string - account string -} - -func extractPlatformInURL(mediaURL string) (meta urlMeta, err error) { - if !strings.HasPrefix(mediaURL, "http") { - mediaURL = "http://" + mediaURL - } - - u, err := url.Parse(mediaURL) - if strings.Contains(u.Hostname(), "facebook") { - // facebook - meta.platform = platformFacebook - pathSegs := strings.Split(u.Path, "/") - // account in first path seg - if len(pathSegs) >= 2 { - meta.account = pathSegs[1] - } - } else if strings.Contains(u.Hostname(), "twitter") { - // twitter - meta.platform = platformTwitter - pathSegs := strings.Split(u.Path, "/") - // account in first path seg - if len(pathSegs) >= 2 { - meta.account = pathSegs[1] - } - } else if strings.Contains(u.Hostname(), "weibo") { - // weibo - meta.platform = platformWeibo - pathSegs := strings.Split(u.Path, "/") - // account in first path seg - if len(pathSegs) >= 2 { - meta.account = pathSegs[1] - } - } else { - err = ErrInvalidURL - } - - return -} diff --git a/cmd/cql-faucet/rpc.go b/cmd/cql-faucet/rpc.go deleted file mode 100644 index 6598df633..000000000 --- a/cmd/cql-faucet/rpc.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/rpc" -) - -func requestBP(method string, req interface{}, resp interface{}) (err error) { - var bp proto.NodeID - if bp, err = rpc.GetCurrentBP(); err != nil { - return err - } - return rpc.NewCaller().CallNode(bp, method, req, resp) -} diff --git a/cmd/cql-faucet/verifier.go b/cmd/cql-faucet/verifier.go deleted file mode 100644 index bb7090d2c..000000000 --- a/cmd/cql-faucet/verifier.go +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strings" - "sync" - "time" - - "github.com/CovenantSQL/xurls" - "github.com/dyatlov/go-opengraph/opengraph" - - "github.com/CovenantSQL/CovenantSQL/crypto" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/route" - pt "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -var ( - regexpTextContent = regexp.MustCompile("(?i)\"text\"\\s*:\\s*(\".+\")\\s*,\\s*") - medClient = &http.Client{} - locClient = &http.Client{ - CheckRedirect: func(_ *http.Request, _ []*http.Request) error { - return http.ErrUseLastResponse - }, - } -) - -const ( - uaPC = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.9 Safari/537.36" - uaMobile = "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1" - uaCurl = "curl/7.54.0" - retryCount = 10 - retryTime = time.Second - verificationPerRound = 100 - dispensePerRound = 100 -) - -// Verifier defines the social media post content verifier. -type Verifier struct { - // settings - interval time.Duration - lastVerified int64 - lastDispensed int64 - contentRequired []string - urlRequired string - vaultAddress proto.AccountAddress - privateKey *asymmetric.PrivateKey - publicKey *asymmetric.PublicKey - - // persistence - p *Persistence - - stopCh chan struct{} -} - -// NewVerifier returns a new verifier instance. -func NewVerifier(cfg *Config, p *Persistence) (v *Verifier, err error) { - v = &Verifier{ - interval: cfg.VerificationInterval, - lastVerified: 0, - lastDispensed: 0, - contentRequired: cfg.ContentRequired, - urlRequired: cfg.URLRequired, - p: p, - stopCh: make(chan struct{}), - } - - if v.publicKey, err = kms.GetLocalPublicKey(); err != nil { - return - } - - if v.privateKey, err = kms.GetLocalPrivateKey(); err != nil { - return - } - - // generate source account address - if v.vaultAddress, err = crypto.PubKeyHash(v.publicKey); err != nil { - return - } - - log.WithField("vault", v.vaultAddress.String()).Info("init verifier") - - return -} - -func (v *Verifier) run() { - for { - log.Info("begin verification iteration") - - // fetch records - v.verify() - - // dispense - v.dispense() - - log.Info("end verification iteration") - - select { - case <-time.After(v.interval): - case <-v.stopCh: - return - } - } -} - -func (v *Verifier) stop() { - select { - case <-v.stopCh: - default: - close(v.stopCh) - } -} - -func (v *Verifier) verify() { - wg := &sync.WaitGroup{} - ch := make(chan int64, 3) - runTask := func(wg *sync.WaitGroup, ch chan int64, f func() (int64, error)) { - defer wg.Done() - verified, err := f() - if err != nil { - log.WithError(err).Warning("verify application failed") - ch <- verified - } - } - - wg.Add(1) - go runTask(wg, ch, v.verifyFacebook) - wg.Add(1) - go runTask(wg, ch, v.verifyTwitter) - wg.Add(1) - go runTask(wg, ch, v.verifyWeibo) - - wg.Wait() - close(ch) - - for verified := range ch { - if verified >= v.lastVerified { - v.lastVerified = verified - } - } -} - -func (v *Verifier) verifyFacebook() (verified int64, err error) { - var records []*applicationRecord - if records, err = v.p.getRecords(v.lastVerified, platformFacebook, StateApplication, verificationPerRound); err != nil { - return - } - - // check records - return v.doVerify(records, verifyFacebook) -} - -func (v *Verifier) verifyTwitter() (verified int64, err error) { - var records []*applicationRecord - if records, err = v.p.getRecords(v.lastVerified, platformTwitter, StateApplication, verificationPerRound); err != nil { - return - } - - // check records - return v.doVerify(records, verifyTwitter) -} - -func (v *Verifier) verifyWeibo() (verified int64, err error) { - var records []*applicationRecord - if records, err = v.p.getRecords(v.lastVerified, platformWeibo, StateApplication, verificationPerRound); err != nil { - return - } - - // check records - return v.doVerify(records, verifyWeibo) -} - -func (v *Verifier) dispense() (err error) { - var records []*applicationRecord - if records, err = v.p.getRecords(v.lastDispensed, "", StateVerified, dispensePerRound); err != nil { - return - } - - // dispense - for _, record := range records { - if err = v.dispenseOne(record); err != nil { - return - } - } - - return -} - -func (v *Verifier) dispenseOne(r *applicationRecord) (err error) { - balanceReq := &pt.QueryAccountTokenBalanceReq{} - balanceRes := &pt.QueryAccountTokenBalanceResp{} - balanceReq.Addr = v.vaultAddress - balanceReq.TokenType = pt.Particle - - // get current balance - if err = requestBP(route.MCCQueryAccountTokenBalance.String(), balanceReq, balanceRes); err != nil { - log.WithError(err).Warning("get account balance failed") - } else { - log.WithField("balance", balanceRes.Balance).Info("get account balance") - } - - // allocate nonce - nonceReq := &pt.NextAccountNonceReq{} - nonceResp := &pt.NextAccountNonceResp{} - nonceReq.Addr = v.vaultAddress - - if err = requestBP(route.MCCNextAccountNonce.String(), nonceReq, nonceResp); err != nil { - // allocate nonce failed - log.WithError(err).Warning("allocate nonce for transaction failed") - return - } - - // decode target account address - var targetAddress proto.AccountAddress - - req := &pt.AddTxReq{TTL: 1} - resp := &pt.AddTxResp{} - req.Tx = pt.NewTransfer( - &pt.TransferHeader{ - Sender: v.vaultAddress, - Receiver: targetAddress, - Nonce: nonceResp.Nonce, - Amount: uint64(r.tokenAmount), - }, - ) - if err = req.Tx.Sign(v.privateKey); err != nil { - // sign failed? - return - } - - if err = requestBP(route.MCCAddTx.String(), req, resp); err != nil { - // add transaction failed, try again - log.WithError(err).Warning("send transaction failed") - - return - } - - // save dispense result - r.state = StateDispensed - - if err = v.p.updateRecord(r); err != nil { - // failed - return - } - - log.WithFields(log.Fields(r.asMap())).Info("dispensed application record") - - return -} - -func (v *Verifier) doVerify(records []*applicationRecord, verifyFunc func(string, []string, string) error) (verified int64, err error) { - for _, r := range records { - if err = verifyFunc(r.mediaURL, v.contentRequired, v.urlRequired); err != nil { - r.failReason = err.Error() - r.state = StateFailed - } else { - r.state = StateVerified - } - - if err = v.p.updateRecord(r); err != nil { - // failed - return - } - - log.WithFields(log.Fields(r.asMap())).Info("verified application record") - - verified = r.rowID - } - - return -} - -func verifyFacebook(mediaURL string, contentRequired []string, urlRequired string) (err error) { - var resp string - resp, err = makeRequest(mediaURL, uaPC, retryCount) - if err != nil { - return - } - og := opengraph.NewOpenGraph() - if err = og.ProcessHTML(strings.NewReader(resp)); err != nil { - return - } - - // description contains sharing content - if !containsOneOf(og.Description, contentRequired) { - return ErrRequiredContentNotExists - } - if !strings.Contains(og.Description, urlRequired) { - return ErrRequiredURLNotExists - } - - return nil -} - -func verifyTwitter(mediaURL string, contentRequired []string, urlRequired string) (err error) { - var resp string - resp, err = makeRequest(mediaURL, uaPC, retryCount) - if err != nil { - return - } - og := opengraph.NewOpenGraph() - if err = og.ProcessHTML(strings.NewReader(resp)); err != nil { - return - } - - // description contains sharing content - if !containsOneOf(og.Description, contentRequired) { - return ErrRequiredContentNotExists - } - - // check url - if err = containsURL(og.Description, urlRequired, retryCount); err != nil { - return err - } - - return nil -} - -func verifyWeibo(mediaURL string, contentRequired []string, urlRequired string) (err error) { - var resp string - resp, err = makeRequest(mediaURL, uaMobile, retryCount) - if err != nil { - return - } - // extract text fields - matches := regexpTextContent.FindStringSubmatch(resp) - if len(matches) <= 1 { - // parser err - return ErrRequiredContentNotExists - } - - // unquote json - var textContent string - if err = json.Unmarshal([]byte(matches[1]), &textContent); err != nil { - return - } - - // test - if !containsOneOf(textContent, contentRequired) { - return ErrRequiredContentNotExists - } - if !strings.Contains(textContent, urlRequired) { - return ErrRequiredURLNotExists - } - - return nil -} - -func containsOneOf(content string, contentRequired []string) bool { - log.WithFields(log.Fields{ - "provided": content, - "required": contentRequired, - }).Info("matching content") - for _, v := range contentRequired { - if strings.Contains(content, v) { - return true - } - } - return false -} - -func containsURL(content string, url string, retry int) (err error) { - // extract all urls in string and send test request - urls := xurls.Strict().FindAllString(content, -1) - - for _, shortedURL := range urls { - if strings.Contains(shortedURL, url) { - return nil - } - - if redirectURL, err := locationRequest(shortedURL, uaCurl, retry); err == nil { - if strings.Contains(redirectURL, url) { - return nil - } - } - } - - return ErrRequiredURLNotExists -} - -func makeRequest(reqURL string, ua string, retry int) (response string, err error) { - var req *http.Request - req, err = http.NewRequest("GET", reqURL, bytes.NewReader([]byte{})) - req.Header.Add("User-Agent", ua) - - for i := retry; i >= 0; i-- { - var resp *http.Response - resp, err = medClient.Do(req) - - if err == nil { - defer resp.Body.Close() - var resBytes []byte - if resBytes, err = ioutil.ReadAll(resp.Body); err == nil { - response = string(resBytes) - return - } - } - - time.Sleep(retryTime) - } - - return - -} - -func locationRequest(reqURL string, ua string, retry int) (redirectURL string, err error) { - var req *http.Request - req, err = http.NewRequest("HEAD", reqURL, bytes.NewReader([]byte{})) - req.Header.Add("User-Agent", ua) - - for i := retry; i >= 0; i-- { - var resp *http.Response - resp, err = locClient.Do(req) - - if err == nil { - defer resp.Body.Close() - var urlObj *url.URL - if urlObj, err = resp.Location(); err == nil { - redirectURL = urlObj.String() - return - } - } - - time.Sleep(retryTime) - } - - return -} diff --git a/cmd/cql-faucet/verifier_test.go b/cmd/cql-faucet/verifier_test.go deleted file mode 100644 index 4c19e673d..000000000 --- a/cmd/cql-faucet/verifier_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "testing" - - . "github.com/smartystreets/goconvey/convey" -) - -func TestVerifyFacebook(t *testing.T) { - Convey("", t, func() { - var err error - err = verifyFacebook("https://www.facebook.com/hupili/posts/1700877176661446", - []string{"xxx", "Initium Media"}, "https://github.com/initiumlab/beijinguprooted") - So(err, ShouldBeNil) - err = verifyFacebook("facebook.com/dualipaofficial/posts/1832797603472815", - []string{"xxx", "ELECTRICITY"}, "http://www.baidu.com") - So(err, ShouldNotBeNil) - err = verifyFacebook("facebook.com/dualipaofficial/posts/1832797603472815", - []string{"xxx", "哈哈"}, "http://smarturl.it/SilkCityElectricity/youtube") - So(err, ShouldNotBeNil) - }) -} - -func TestVerifyTwitter(t *testing.T) { - Convey("", t, func() { - var err error - err = verifyTwitter("https://twitter.com/tualatrix/status/1040460103898394624", - []string{"xxx", "好奇心日报"}, "http://m.qdaily.com") - So(err, ShouldBeNil) - err = verifyTwitter("https://twitter.com/Fenng/status/1040487918995791873", - []string{"xxx", "阿里巴巴"}, "http://www.baidu.com") - So(err, ShouldNotBeNil) - err = verifyTwitter("https://twitter.com/Fenng/status/1040487918995791873", - []string{"xxx", "百度"}, "https://twitter.com") - So(err, ShouldNotBeNil) - }) -} - -func TestVerifyWeibo(t *testing.T) { - Convey("", t, func() { - var err error - err = verifyWeibo("https://weibo.com/2104296457/GzhcXuPNB", - []string{"xxx", "Mavic"}, "https://www.chiphell.com") - So(err, ShouldBeNil) - err = verifyWeibo("https://weibo.com/2104296457/Gz8vO2gOc", - []string{"xxx", "卡西欧"}, "http://www.baidu.com") - So(err, ShouldNotBeNil) - err = verifyWeibo("https://weibo.com/2104296457/Gz8vO2gOc", - []string{"xxx", "哈哈"}, "https://www.chiphell.com") - So(err, ShouldNotBeNil) - }) -} From c1773461068478e5b2a834eba4e6e4bb3d25f9e3 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:14:20 +0800 Subject: [PATCH 141/244] Fix typo of errors package --- cmd/cql-faucet/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index f3e16c478..1d23fb5a2 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -26,7 +26,7 @@ import ( "github.com/gorilla/handlers" "github.com/gorilla/mux" - "github.com/pingcap/errors" + "github.com/pkg/errors" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" "github.com/CovenantSQL/CovenantSQL/client" From 2e3817d51e3ed8032680ff9faf56a48a51f2a484 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:25:31 +0800 Subject: [PATCH 142/244] Support overriding listen addr in faucet commandline --- cmd/cql-faucet/config.go | 6 +++++- cmd/cql-faucet/main.go | 4 +++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/cmd/cql-faucet/config.go b/cmd/cql-faucet/config.go index 1f4205a84..b43b2a48c 100644 --- a/cmd/cql-faucet/config.go +++ b/cmd/cql-faucet/config.go @@ -40,7 +40,7 @@ type confWrapper struct { } // LoadConfig load the common covenantsql client config again for extra faucet config. -func LoadConfig(configPath string) (config *Config, err error) { +func LoadConfig(listenAddr string, configPath string) (config *Config, err error) { var configBytes []byte if configBytes, err = ioutil.ReadFile(configPath); err != nil { log.WithError(err).Error("read config file failed") @@ -62,6 +62,10 @@ func LoadConfig(configPath string) (config *Config, err error) { config = configWrapper.Faucet // validate config + if listenAddr != "" { + config.ListenAddr = listenAddr + } + if config.ListenAddr == "" { err = ErrInvalidFaucetConfig log.Error("ListenAddr is not defined in faucet config") diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index 4d86be5bf..f150dbcb0 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -35,12 +35,14 @@ const name = "cql-faucet" var ( version = "unknown" + listenAddr string configFile string password string showVersion bool ) func init() { + flag.StringVar(&listenAddr, "listen", "", "API listen addr (will override settings in config file") flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Configuration file for covenantsql") flag.StringVar(&password, "password", "", "Master key password for covenantsql") flag.BoolVar(&asymmetric.BypassSignature, "bypass-signature", false, @@ -73,7 +75,7 @@ func main() { // load faucet config from same config file var cfg *Config - if cfg, err = LoadConfig(configFile); err != nil { + if cfg, err = LoadConfig(listenAddr, configFile); err != nil { log.WithError(err).Error("read faucet config failed") os.Exit(-1) return From 043148f5265891d7c748246cabd9746d22dccee4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:38:03 +0800 Subject: [PATCH 143/244] Return token amount in faucet api --- cmd/cql-faucet/api.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index 1d23fb5a2..7aaa217c7 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -155,8 +155,9 @@ func (d *service) applyToken(rw http.ResponseWriter, r *http.Request) { } sendResponse(http.StatusOK, true, nil, map[string]interface{}{ - "id": applicationID, - "tx": txHash.String(), + "id": applicationID, + "tx": txHash.String(), + "amount": d.p.tokenAmount, }, rw) return From 2c9ee840ecba6fcd400c89b4a2dcae3edfb19791 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:45:15 +0800 Subject: [PATCH 144/244] Use argTx instead of constant literal --- cmd/cql-faucet/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index 7aaa217c7..d8ea57f33 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -371,7 +371,7 @@ func (d *service) privatizeDB(rw http.ResponseWriter, r *http.Request) { func (d *service) waitTx(rw http.ResponseWriter, r *http.Request) { // get args - tx := r.FormValue("tx") + tx := r.FormValue(argTx) var ( txHash *hash.Hash From 39a3378124b2331d713197413ac4b9cfd286c98c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:48:53 +0800 Subject: [PATCH 145/244] Add missing log to faucet persistence init error --- cmd/cql-faucet/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-faucet/main.go b/cmd/cql-faucet/main.go index f150dbcb0..5a7ac72cf 100644 --- a/cmd/cql-faucet/main.go +++ b/cmd/cql-faucet/main.go @@ -84,7 +84,7 @@ func main() { // init persistence var p *Persistence if p, err = NewPersistence(cfg); err != nil { - log.Errorf("") + log.WithError(err).Error("init persistence storage failed") return } From 19a750e79efe62cb0bc3ef2386b0216ef00773b4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 21:57:19 +0800 Subject: [PATCH 146/244] Add keystore in conf to docker ignore list --- .dockerignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.dockerignore b/.dockerignore index 5a71055bc..34284bb55 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,3 +1,4 @@ test/ bin/cql* +conf/testnet*/*.keystore *.cover.out From 91701c658e04ab1465c08f9530ac4810cef34497 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Mon, 18 Mar 2019 22:22:20 +0800 Subject: [PATCH 147/244] Fix typo in faucet api callbacks --- cmd/cql-faucet/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index d8ea57f33..c3364125c 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -421,7 +421,7 @@ func startAPI(p *Persistence, listenAddr string) (server *http.Server, err error v1Router.HandleFunc("/apply_token", service.applyToken).Methods("POST") v1Router.HandleFunc("/account_balance", service.getBalance).Methods("GET", "POST") v1Router.HandleFunc("/db_balance", service.getDBBalance).Methods("GET", "POST") - v1Router.HandleFunc("/create_database", service.getDBBalance).Methods("POST") + v1Router.HandleFunc("/create_database", service.createDB).Methods("POST") v1Router.HandleFunc("/privatize", service.privatizeDB).Methods("POST") v1Router.HandleFunc("/wait_tx", service.waitTx).Methods("GET", "POST") From 5b450adce06d0e01d295ca65c8f7ad8260a9fd85 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 11:53:20 +0800 Subject: [PATCH 148/244] Add optional node count support for database creation --- cmd/cql-faucet/api.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/cmd/cql-faucet/api.go b/cmd/cql-faucet/api.go index c3364125c..4fa78920a 100644 --- a/cmd/cql-faucet/api.go +++ b/cmd/cql-faucet/api.go @@ -22,6 +22,7 @@ import ( "net/http" "net/url" "regexp" + "strconv" "time" "github.com/gorilla/handlers" @@ -42,10 +43,11 @@ import ( ) const ( - argAccount = "account" - argEmail = "email" - argDatabase = "db" - argTx = "tx" + argAccount = "account" + argEmail = "email" + argDatabase = "db" + argTx = "tx" + argNodeCount = "node_count" ) var ( @@ -196,12 +198,20 @@ func (d *service) getBalance(rw http.ResponseWriter, r *http.Request) { func (d *service) createDB(rw http.ResponseWriter, r *http.Request) { // get args account := r.FormValue(argAccount) + rawNodeCount := r.FormValue(argNodeCount) + nodeCount := uint16(1) if !regexAccount.MatchString(account) { sendResponse(http.StatusBadRequest, false, ErrInvalidAccount.Error(), nil, rw) return } + if rawNodeCount != "" { + if tempNodeCount, _ := strconv.Atoi(rawNodeCount); tempNodeCount > 0 { + nodeCount = uint16(tempNodeCount) + } + } + var ( addr proto.AccountAddress txCreateHash hash.Hash @@ -220,7 +230,7 @@ func (d *service) createDB(rw http.ResponseWriter, r *http.Request) { } meta := client.ResourceMeta{} - meta.Node = 1 + meta.Node = nodeCount if txCreateHash, dsn, err = client.Create(meta); err != nil { sendResponse(http.StatusInternalServerError, false, err.Error(), nil, rw) From 1e99e6e7c420bfc6a962859c0a66d72e3dab570a Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 14:50:13 +0800 Subject: [PATCH 149/244] Fix update permission test case nonce no --- blockproducer/metastate_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index 80620e5a0..44a9be0b8 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -1037,10 +1037,11 @@ func TestMetaState(t *testing.T) { err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) - So(errors.Cause(err), ShouldEqual, ErrInvalidPermission) + So(err, ShouldBeNil) // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 + up.Nonce += 1 up.Permission = types.UserPermissionFromRole(types.Admin) err = up.Sign(privKey1) So(err, ShouldBeNil) @@ -1073,7 +1074,7 @@ func TestMetaState(t *testing.T) { err = ms.apply(&up) So(errors.Cause(err), ShouldEqual, ErrNoSuperUserLeft) // addr1(read) update addr3(admin) fail - up.Nonce = cd1.Nonce + 2 + up.Nonce = cd1.Nonce + 3 err = up.Sign(privKey1) So(err, ShouldBeNil) err = ms.apply(&up) @@ -1313,7 +1314,7 @@ func TestMetaState(t *testing.T) { invalidIk3 := &types.IssueKeys{ IssueKeysHeader: types.IssueKeysHeader{ TargetSQLChain: dbAccount, - Nonce: 3, + Nonce: 4, }, } err = invalidIk3.Sign(privKey1) From 3c6b3e2a4c7d6b8fc37daba7edae8005923dc073 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 15:02:18 +0800 Subject: [PATCH 150/244] Golint issues --- blockproducer/metastate_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/blockproducer/metastate_test.go b/blockproducer/metastate_test.go index 44a9be0b8..c290fc14d 100644 --- a/blockproducer/metastate_test.go +++ b/blockproducer/metastate_test.go @@ -1041,7 +1041,7 @@ func TestMetaState(t *testing.T) { // test permission update // addr1(admin) update addr3 as admin up.TargetUser = addr3 - up.Nonce += 1 + up.Nonce++ up.Permission = types.UserPermissionFromRole(types.Admin) err = up.Sign(privKey1) So(err, ShouldBeNil) From dd7f080b8a713c577226499fffbff258599cf4f1 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 14:51:06 +0800 Subject: [PATCH 151/244] Add missing break to users permission update --- worker/dbms.go | 1 + 1 file changed, 1 insertion(+) diff --git a/worker/dbms.go b/worker/dbms.go index 9004bf683..266632c41 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -299,6 +299,7 @@ func (dbms *DBMS) UpdatePermission(dbID proto.DatabaseID, user proto.AccountAddr u.Permission = permStat.Permission u.Status = permStat.Status exist = true + break } } if !exist { From 6c3f566e90f97b41e2dfb6c9f01d03f1969ddb36 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:04:05 +0800 Subject: [PATCH 152/244] Use sqlite for dht key store --- cmd/cql-minerd/integration_test.go | 2 +- crypto/kms/pubkeystore.go | 188 ++++++++++++++++------------- crypto/kms/pubkeystore_test.go | 106 +++++++++++++--- 3 files changed, 195 insertions(+), 101 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index a99a40984..38046c5c3 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -801,7 +801,7 @@ func benchMiner(b *testing.B, minerCount uint16) { // create meta := client.ResourceMeta{ ResourceMeta: types.ResourceMeta{ - Node: minerCount, + Node: minerCount, UseEventualConsistency: benchEventualConsistency, }, } diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 9c574b6ad..c18b1df60 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -17,13 +17,13 @@ package kms import ( + "database/sql" "os" "path/filepath" "runtime" "strings" "sync" - bolt "github.com/coreos/bbolt" "github.com/pkg/errors" "github.com/CovenantSQL/CovenantSQL/conf" @@ -33,19 +33,14 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" ) // PublicKeyStore holds db and bucket name. type PublicKeyStore struct { - db *bolt.DB - bucket []byte + db *xs.SQLite3 } -const ( - // kmsBucketName is the boltdb bucket name - kmsBucketName = "kms" -) - var ( // pks holds the singleton instance pks *PublicKeyStore @@ -61,6 +56,19 @@ var ( BP *conf.BPInfo ) +var ( + initTableSQL = `CREATE TABLE IF NOT EXISTS "kms" ( + "id" TEXT, + "node" BLOB, + UNIQUE ("id") + )` + deleteAllSQL = `DELETE FROM "kms"` + deleteRecordSQL = `DELETE FROM "kms" WHERE "id" = ?` + setRecordSQL = `INSERT OR REPLACE INTO "kms" ("id", "node") VALUES(?, ?)` + getRecordSQL = `SELECT "node" FROM "kms" WHERE "id" = ? LIMIT 1` + getAllNodeIDSQL = `SELECT "id" FROM "kms"` +) + func init() { //HACK(auxten) if we were running go test if strings.HasSuffix(os.Args[0], ".test") || @@ -96,8 +104,6 @@ func InitBP() { var ( // ErrPKSNotInitialized indicates public keystore not initialized ErrPKSNotInitialized = errors.New("public keystore not initialized") - // ErrBucketNotInitialized indicates bucket not initialized - ErrBucketNotInitialized = errors.New("bucket not initialized") // ErrNilNode indicates input node is nil ErrNilNode = errors.New("nil node") // ErrKeyNotFound indicates key not found @@ -114,32 +120,32 @@ func InitPublicKeyStore(dbPath string, initNodes []proto.Node) (err error) { pksLock.Lock() InitBP() - var bdb *bolt.DB - bdb, err = bolt.Open(dbPath, 0600, nil) - if err != nil { - log.WithError(err).Error("InitPublicKeyStore failed") - pksLock.Unlock() - return - } + var strg *xs.SQLite3 + + if strg, err = func() (strg *xs.SQLite3, err error) { + // test if the keystore is a valid sqlite database + // if so, truncate and upgrade to new version - name := []byte(kmsBucketName) - err = bdb.Update(func(tx *bolt.Tx) error { - if _, err := tx.CreateBucketIfNotExists(name); err != nil { - log.WithError(err).Error("could not create bucket") - return err + if err = removeFileIfIsNotSQLite(dbPath); err != nil { + return } - return nil // return from Update func - }) - if err != nil { - log.WithError(err).Error("InitPublicKeyStore failed") + if strg, err = xs.NewSqlite(dbPath); err != nil { + return + } + if _, err = strg.Writer().Exec(initTableSQL); err != nil { + return + } + + return + }(); err != nil { pksLock.Unlock() + log.WithError(err).Error("InitPublicKeyStore failed") return } // pks is the singleton instance pks = &PublicKeyStore{ - db: bdb, - bucket: name, + db: strg, } pksLock.Unlock() @@ -173,20 +179,19 @@ func GetNodeInfo(id proto.NodeID) (nodeInfo *proto.Node, err error) { return nil, ErrPKSNotInitialized } - err = pks.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(pks.bucket) - if bucket == nil { - return ErrBucketNotInitialized - } - byteVal := bucket.Get([]byte(id)) - if byteVal == nil { - return ErrKeyNotFound + if err = func() (err error) { + var rawNodeInfo []byte + if err = pks.db.Writer().QueryRow(getRecordSQL, string(id)).Scan(&rawNodeInfo); err != nil { + if errors.Cause(err) == sql.ErrNoRows { + err = ErrKeyNotFound + } + return } - err = utils.DecodeMsgPack(byteVal, &nodeInfo) + err = utils.DecodeMsgPack(rawNodeInfo, &nodeInfo) log.Debugf("get node info: %#v", nodeInfo) - return err // return from View func - }) - if err != nil { + + return + }(); err != nil { err = errors.Wrap(err, "get node info failed") } return @@ -198,19 +203,25 @@ func GetAllNodeID() (nodeIDs []proto.NodeID, err error) { return nil, ErrPKSNotInitialized } - err = pks.db.View(func(tx *bolt.Tx) error { - bucket := tx.Bucket(pks.bucket) - if bucket == nil { - return ErrBucketNotInitialized + if err = func() (err error) { + var rows *sql.Rows + if rows, err = pks.db.Writer().Query(getAllNodeIDSQL); err != nil { + return } - err := bucket.ForEach(func(k, v []byte) error { - nodeIDs = append(nodeIDs, proto.NodeID(k)) - return nil - }) - return err // return from View func - }) - if err != nil { + defer rows.Close() + + for rows.Next() { + var rawNodeID string + if err = rows.Scan(&rawNodeID); err != nil { + return + } + + nodeIDs = append(nodeIDs, proto.NodeID(rawNodeID)) + } + + return + }(); err != nil { err = errors.Wrap(err, "get all node id failed") } return @@ -266,15 +277,9 @@ func setNode(nodeInfo *proto.Node) (err error) { } log.Debugf("set node: %#v", nodeInfo) - err = pks.db.Update(func(tx *bolt.Tx) error { - bucket := tx.Bucket(pks.bucket) - if bucket == nil { - return ErrBucketNotInitialized - } - return bucket.Put([]byte(nodeInfo.ID), nodeBuf.Bytes()) - }) + _, err = pks.db.Writer().Exec(setRecordSQL, string(nodeInfo.ID), nodeBuf.Bytes()) if err != nil { - err = errors.Wrap(err, "get node info failed") + err = errors.Wrap(err, "set node info failed") } return @@ -288,13 +293,7 @@ func DelNode(id proto.NodeID) (err error) { return ErrPKSNotInitialized } - err = pks.db.Update(func(tx *bolt.Tx) error { - bucket := tx.Bucket(pks.bucket) - if bucket == nil { - return ErrBucketNotInitialized - } - return bucket.Delete([]byte(id)) - }) + _, err = pks.db.Writer().Exec(deleteRecordSQL, string(id)) if err != nil { err = errors.Wrap(err, "del node failed") } @@ -306,15 +305,11 @@ func removeBucket() (err error) { pksLock.Lock() defer pksLock.Unlock() if pks != nil { - err = pks.db.Update(func(tx *bolt.Tx) error { - return tx.DeleteBucket([]byte(kmsBucketName)) - }) + _, err = pks.db.Writer().Exec(deleteAllSQL) if err != nil { err = errors.Wrap(err, "remove bucket failed") return } - // ks.bucket == nil means bucket not exist - pks.bucket = nil } return } @@ -323,18 +318,45 @@ func removeBucket() (err error) { func ResetBucket() error { // cause we are going to reset the bucket, the return of removeBucket // is not useful - removeBucket() - pksLock.Lock() - defer pksLock.Unlock() - bucketName := []byte(kmsBucketName) - err := pks.db.Update(func(tx *bolt.Tx) error { - _, err := tx.CreateBucketIfNotExists(bucketName) - return err - }) - pks.bucket = bucketName - if err != nil { - err = errors.Wrap(err, "reset bucket failed") + return removeBucket() +} + +func removeFileIfIsNotSQLite(filename string) (err error) { + var ( + f *os.File + fileHeader [6]byte + ) + if f, err = os.Open(filename); err != nil && os.IsNotExist(err) { + // file not exists + err = nil + return + } else if err != nil { + // may be no read permission + return } - return err + if _, err = f.Read(fileHeader[:]); err != nil { + // read file failed + _ = f.Close() + return + } + + if string(fileHeader[:]) == "SQLite" { + // valid sqlite file + err = nil + _ = f.Close() + return + } + + _ = f.Close() + + // backup and remove file + bakFile := filename + ".bak" + if _, err = os.Stat(bakFile); err != nil && os.IsNotExist(err) { + err = nil + _ = os.Rename(filename, filename+".bak") + } + _ = os.Remove(filename) + + return } diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index ece85af53..011442594 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -17,6 +17,7 @@ package kms import ( + "io/ioutil" "os" "reflect" "testing" @@ -62,7 +63,6 @@ func TestDB(t *testing.T) { os.Remove(dbFile) defer os.Remove(dbFile) InitPublicKeyStore(dbFile, []proto.Node{*BPNode}) - So(pks.bucket, ShouldNotBeNil) nodeInfo, err := GetNodeInfo(BP.NodeID) log.Debugf("nodeInfo %v", nodeInfo) @@ -120,33 +120,105 @@ func TestDB(t *testing.T) { So(pubk, ShouldBeNil) So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) - err = removeBucket() + err = ResetBucket() So(err, ShouldBeNil) - pubk, err = GetPublicKey(proto.NodeID("not exist")) + pubk, err = GetPublicKey(proto.NodeID("2222")) So(pubk, ShouldBeNil) - So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) - - err = setNode(node1) - So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) - - err = DelNode(proto.NodeID("2222")) - So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) + So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) - So(errors.Cause(err), ShouldEqual, ErrBucketNotInitialized) + So(err, ShouldBeNil) + }) +} - err = ResetBucket() +func TestInvalidKeystoreFileRecover(t *testing.T) { + Convey("invalid file recover", t, func() { + os.Remove(dbFile + ".bak") + os.Remove(dbFile) + defer os.Remove(dbFile + ".bak") + defer os.Remove(dbFile) + pks = nil + var err error + err = ioutil.WriteFile(dbFile, []byte("UNKNOWN_DATA_MUST_NOT_BE_A_SQLITE_DATABASE"), 0600) + So(err, ShouldBeNil) + st, err := os.Stat(dbFile) + So(err, ShouldBeNil) + So(st.IsDir(), ShouldBeFalse) + err = InitPublicKeyStore(dbFile, nil) So(err, ShouldBeNil) + // backup should exists + st, err = os.Stat(dbFile + ".bak") + So(err, ShouldBeNil) + So(st.IsDir(), ShouldBeFalse) + }) - pubk, err = GetPublicKey(proto.NodeID("2222")) - So(pubk, ShouldBeNil) - So(errors.Cause(err), ShouldEqual, ErrKeyNotFound) + Convey("backup keystore file should not be overwritten if exists", t, func() { + backupFile := dbFile + ".bak" + os.Remove(backupFile) + os.Remove(dbFile) + defer os.Remove(backupFile) + defer os.Remove(dbFile) - IDs, err = GetAllNodeID() - So(IDs, ShouldBeNil) + pks = nil + + var err error + err = ioutil.WriteFile(dbFile, []byte("backup_1"), 0600) + So(err, ShouldBeNil) + err = ioutil.WriteFile(backupFile, []byte("backup_2"), 0600) + So(err, ShouldBeNil) + st, err := os.Stat(dbFile) + So(err, ShouldBeNil) + So(st.IsDir(), ShouldBeFalse) + st, err = os.Stat(backupFile) + So(err, ShouldBeNil) + So(st.IsDir(), ShouldBeFalse) + err = InitPublicKeyStore(dbFile, nil) + So(err, ShouldBeNil) + // backup should not be overwritten + backupData, err := ioutil.ReadFile(backupFile) + So(err, ShouldBeNil) + So(backupData, ShouldResemble, []byte("backup_2")) + }) + + Convey("sqlite keystore should not be truncated", t, func() { + os.Remove(dbFile) + defer os.Remove(dbFile) + + _, pubKey1, _ := asymmetric.GenSecp256k1KeyPair() + node1 := &proto.Node{ + ID: proto.NodeID("1111"), + Addr: "", + PublicKey: pubKey1, + Nonce: cpuminer.Uint256{}, + } + + var err error + err = InitPublicKeyStore(dbFile, nil) + So(err, ShouldBeNil) + + // set node + setNode(node1) + // get node + node, err := GetNodeInfo(node1.ID) + So(node, ShouldNotBeNil) + So(err, ShouldBeNil) + + // clear and init again + pks = nil + err = InitPublicKeyStore(dbFile, nil) + So(err, ShouldBeNil) + + // get again + node, err = GetNodeInfo(node1.ID) + So(node, ShouldNotBeNil) So(err, ShouldBeNil) + + // backup file should not exists + _, err = os.Stat(dbFile + ".bak") + So(err, ShouldNotBeNil) + So(os.IsNotExist(err), ShouldBeTrue) }) } From fe9c2886310f992e13e5f4bc8c12071aabc00f08 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:07:33 +0800 Subject: [PATCH 153/244] Update gitignore for keystore temp files including sqlite wal/shm --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 848756ec3..8aaec41ab 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,7 @@ idminer coverage.txt analysisVendor.png vendor/**/.gitignore -*.keystore +*.keystore* node_*/ kayak_test *.conf From 625e974a6b6206fa2f3c0948307ccc7316ac880c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:19:11 +0800 Subject: [PATCH 154/244] Remove current keystore file only when backup already exists --- crypto/kms/pubkeystore.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index c18b1df60..3e788c4bc 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -355,8 +355,9 @@ func removeFileIfIsNotSQLite(filename string) (err error) { if _, err = os.Stat(bakFile); err != nil && os.IsNotExist(err) { err = nil _ = os.Rename(filename, filename+".bak") + } else { + _ = os.Remove(filename) } - _ = os.Remove(filename) return } From 12dfa79e3914a0b7fce7238efcffaa3f41f749b4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:29:40 +0800 Subject: [PATCH 155/244] Add kms public keystore recycle function for leak test --- crypto/kms/pubkeystore.go | 10 ++++++++++ crypto/kms/pubkeystore_test.go | 3 +++ worker/db_test.go | 1 + 3 files changed, 14 insertions(+) diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 3e788c4bc..848a71f59 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -321,6 +321,16 @@ func ResetBucket() error { return removeBucket() } +// ClosePublicKeyStore closes the public key store. +func ClosePublicKeyStore() { + pksLock.Lock() + defer pksLock.Unlock() + if pks != nil { + _ = pks.db.Close() + pks = nil + } +} + func removeFileIfIsNotSQLite(filename string) (err error) { var ( f *os.File diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index 011442594..d7101936d 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -130,6 +130,9 @@ func TestDB(t *testing.T) { IDs, err = GetAllNodeID() So(IDs, ShouldBeNil) So(err, ShouldBeNil) + + // test close, this close is for leak test purpose + ClosePublicKeyStore() }) } diff --git a/worker/db_test.go b/worker/db_test.go index 9fa895340..3debf8569 100644 --- a/worker/db_test.go +++ b/worker/db_test.go @@ -433,6 +433,7 @@ func TestInitFailed(t *testing.T) { func TestDatabaseRecycle(t *testing.T) { defer leaktest.Check(t)() + defer kms.ClosePublicKeyStore() // test init/shutdown/destroy // test goroutine status From 180d97d7bbb30fc3e838274aad0dbfbb74392d5b Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 10:37:57 +0800 Subject: [PATCH 156/244] Fix invalid format directive --- worker/dbms.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/worker/dbms.go b/worker/dbms.go index 266632c41..fa65144d2 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -543,17 +543,17 @@ func (dbms *DBMS) checkPermission(addr proto.AccountAddress, switch queryType { case types.ReadQuery: if !permStat.Permission.HasReadPermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %d", permStat.Permission) + err = errors.Wrapf(ErrPermissionDeny, "cannot read, permission: %v", permStat.Permission) return } case types.WriteQuery: if !permStat.Permission.HasWritePermission() { - err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %d", permStat.Permission) + err = errors.Wrapf(ErrPermissionDeny, "cannot write, permission: %v", permStat.Permission) return } default: err = errors.Wrapf(ErrInvalidPermission, - "invalid permission, permission: %d", permStat.Permission) + "invalid permission, permission: %v", permStat.Permission) return } From d7a614b2e3019ba9a44a3731855e1092d475ffcf Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:34:47 +0800 Subject: [PATCH 157/244] Close opened public key store before init --- crypto/kms/pubkeystore.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 848a71f59..b198ee690 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -117,6 +117,9 @@ var ( func InitPublicKeyStore(dbPath string, initNodes []proto.Node) (err error) { //testFlag := flag.Lookup("test") //log.Debugf("%#v %#v", testFlag, testFlag.Value) + // close already opened public key store + ClosePublicKeyStore() + pksLock.Lock() InitBP() From 4c65bae38c5d7af83f7eb9140281a51ba82d7bfa Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 16:51:35 +0800 Subject: [PATCH 158/244] Fix corner case in keystore unit test --- consistent/consistent_test.go | 2 +- consistent/load_test.go | 5 +++-- crypto/kms/pubkeystore_test.go | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/consistent/consistent_test.go b/consistent/consistent_test.go index e2e476bdc..2dfabb451 100644 --- a/consistent/consistent_test.go +++ b/consistent/consistent_test.go @@ -34,7 +34,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) -const testStorePath = "./test.store" +const testStorePath = "./test.keystore" var o sync.Once diff --git a/consistent/load_test.go b/consistent/load_test.go index d37fbcada..5fb2aadc5 100644 --- a/consistent/load_test.go +++ b/consistent/load_test.go @@ -27,8 +27,8 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils" ) -const testStorePath1 = "./test.store1" -const testStorePath2 = "./test.store2" +const testStorePath1 = "./test1.keystore" +const testStorePath2 = "./test2.keystore" func TestSaveDHT(t *testing.T) { kms.Unittest = true @@ -43,6 +43,7 @@ func TestSaveDHT(t *testing.T) { So(len(x.circle), ShouldEqual, x.NumberOfReplicas*2) So(len(x.sortedHashes), ShouldEqual, x.NumberOfReplicas*2) So(sort.IsSorted(x.sortedHashes), ShouldBeTrue) + kms.ClosePublicKeyStore() utils.CopyFile(testStorePath1, testStorePath2) }) } diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index d7101936d..04dfc2cc7 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -33,7 +33,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) -const dbFile = ".test.db" +const dbFile = ".test.keystore" func TestDB(t *testing.T) { log.SetLevel(log.DebugLevel) From cc9fbfcc2151cd0af5d51d6253e56d7be569520d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 17:09:59 +0800 Subject: [PATCH 159/244] Fix error on empty or file length shorted than sqlite magic files --- crypto/kms/pubkeystore.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index b198ee690..2ab4e1d0c 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -18,6 +18,7 @@ package kms import ( "database/sql" + "io" "os" "path/filepath" "runtime" @@ -348,7 +349,7 @@ func removeFileIfIsNotSQLite(filename string) (err error) { return } - if _, err = f.Read(fileHeader[:]); err != nil { + if _, err = f.Read(fileHeader[:]); err != nil && errors.Cause(err) != io.EOF { // read file failed _ = f.Close() return From d272202baad1ea13141e4fe745c7d24ef3657a57 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 17:33:42 +0800 Subject: [PATCH 160/244] Fix twopc import cycle bug --- twopc/twopc_test.go | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/twopc/twopc_test.go b/twopc/twopc_test.go index fa0accf04..87a6862d1 100644 --- a/twopc/twopc_test.go +++ b/twopc/twopc_test.go @@ -14,12 +14,13 @@ * limitations under the License. */ -package twopc +package twopc_test import ( "context" "errors" "fmt" + "github.com/CovenantSQL/CovenantSQL/twopc" "net" "os" "strconv" @@ -49,7 +50,7 @@ const ( ) var ( - nodes []Worker + nodes []twopc.Worker policy TestPolicy pass = "DU>p~[/dd2iImUs*" ) @@ -215,7 +216,7 @@ func (r *RaftNodeRPCServer) RPCRollback(req *RaftRollbackReq, resp *RaftRollback return nil } -func (r *RaftNode) Prepare(ctx context.Context, wb WriteBatch) (err error) { +func (r *RaftNode) Prepare(ctx context.Context, wb twopc.WriteBatch) (err error) { log.WithFields(log.Fields{ "addr": r.addr, "phase": "prepare", @@ -269,7 +270,7 @@ func (r *RaftNode) Prepare(ctx context.Context, wb WriteBatch) (err error) { return err } -func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (result interface{}, err error) { +func (r *RaftNode) Commit(ctx context.Context, wb twopc.WriteBatch) (result interface{}, err error) { log.Debugf("executing 2pc: addr = %s, phase = commit", r.addr) defer log.Debugf("2pc result: addr = %s, phase = commit, result = %v", r.addr, err) @@ -318,7 +319,7 @@ func (r *RaftNode) Commit(ctx context.Context, wb WriteBatch) (result interface{ return } -func (r *RaftNode) Rollback(ctx context.Context, wb WriteBatch) (err error) { +func (r *RaftNode) Rollback(ctx context.Context, wb twopc.WriteBatch) (err error) { log.Debugf("executing 2pc: addr = %s, phase = rollback", r.addr) defer log.Debugf("2pc result: addr = %s, phase = rollback, result = %v", r.addr, err) @@ -368,7 +369,7 @@ func (r *RaftNode) Rollback(ctx context.Context, wb WriteBatch) (err error) { func testSetup() (err error) { log.SetLevel(log.DebugLevel) - nodes = make([]Worker, 10) + nodes = make([]twopc.Worker, 10) for index := 0; index < 10; index++ { nodes[index], err = NewRaftNode() @@ -414,7 +415,7 @@ func TestMain(m *testing.M) { } func TestTwoPhaseCommit(t *testing.T) { - c := NewCoordinator(&Options{timeout: 5 * time.Second}) + c := twopc.NewCoordinator(twopc.NewOptions(5 * time.Second)) testNodeReset() @@ -461,29 +462,30 @@ func TestTwoPhaseCommit_WithHooks(t *testing.T) { beforeRollbackError := errors.New("before rollback error") policy = AllGood - c := NewCoordinator(&Options{ - timeout: 5 * time.Second, - beforePrepare: func(cxt context.Context) error { + c := twopc.NewCoordinator(twopc.NewOptionsWithCallback( + 5*time.Second, + func(cxt context.Context) error { if errorBeforePrepare { return beforePrepareError } return nil - }, - beforeCommit: func(ctx context.Context) error { + }, // before prepare + func(ctx context.Context) error { if errorBeforeCommit { return beforeCommitError } return nil - }, - beforeRollback: func(ctx context.Context) error { + }, // before commit + func(ctx context.Context) error { if errorBeforeRollback { return beforeRollbackError } return nil - }}) + }, // before rollback + nil)) testNodeReset() From eea91b545cc40ded8174a4fe9c1396b29c9ded38 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 18:34:46 +0800 Subject: [PATCH 161/244] Recycle keystore file in unit test --- client/helper_test.go | 4 +- cmd/cql-minerd/node.go | 4 +- cmd/cqld/bench_test.go | 8 +- consistent/consistent_test.go | 154 ++++++++++++++++----------------- consistent/load_test.go | 9 +- crypto/kms/pubkeystore_test.go | 49 +++++++---- metric/rpc_test.go | 4 +- route/acl_test.go | 6 +- route/service_test.go | 11 ++- rpc/client_test.go | 2 +- rpc/leak_test.go | 2 +- rpc/rpcutil_test.go | 33 ++++--- rpc/server_test.go | 10 +-- twopc/twopc_test.go | 3 +- utils/remove.go | 34 ++++++++ utils/remove_test.go | 48 ++++++++++ worker/chainbusservice_test.go | 4 +- worker/helper_test.go | 4 +- 18 files changed, 240 insertions(+), 149 deletions(-) create mode 100644 utils/remove.go create mode 100644 utils/remove_test.go diff --git a/client/helper_test.go b/client/helper_test.go index 0485488b2..f0c1eb034 100644 --- a/client/helper_test.go +++ b/client/helper_test.go @@ -201,9 +201,9 @@ func initNode() (cleanupFunc func(), tempDir string, server *rpc.Server, err err // init conf _, testFile, _, _ := runtime.Caller(0) pubKeyStoreFile := filepath.Join(tempDir, PubKeyStorePath+"_dht") - os.Remove(pubKeyStoreFile) + utils.RemoveAll(pubKeyStoreFile + "*") clientPubKeyStoreFile := filepath.Join(tempDir, PubKeyStorePath+"_c") - os.Remove(clientPubKeyStoreFile) + utils.RemoveAll(clientPubKeyStoreFile + "*") dupConfFile := filepath.Join(tempDir, "config.yaml") confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") if err = utils.DupConf(confFile, dupConfFile); err != nil { diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index 390ebd04b..d4572b5e0 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -18,7 +18,6 @@ package main import ( "fmt" - "os" "syscall" "time" @@ -28,6 +27,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -69,7 +69,7 @@ func initNode() (server *rpc.Server, err error) { } func createServer(privateKeyPath, pubKeyStorePath string, masterKey []byte, listenAddr string) (server *rpc.Server, err error) { - os.Remove(pubKeyStorePath) + utils.RemoveAll(pubKeyStorePath + "*") server = rpc.NewServer() if err != nil { diff --git a/cmd/cqld/bench_test.go b/cmd/cqld/bench_test.go index 3bbd21fd0..df4d35504 100644 --- a/cmd/cqld/bench_test.go +++ b/cmd/cqld/bench_test.go @@ -63,7 +63,7 @@ func start3BPs() { var cmd *utils.CMD os.Remove(FJ(testWorkingDir, "./node_0/chain.db")) os.Remove(FJ(testWorkingDir, "./node_0/dht.db")) - os.Remove(FJ(testWorkingDir, "./node_0/public.keystore")) + utils.RemoveAll(FJ(testWorkingDir, "./node_0/public.keystore*")) if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./node_0/config.yaml"), @@ -77,7 +77,7 @@ func start3BPs() { } os.Remove(FJ(testWorkingDir, "./node_1/chain.db")) os.Remove(FJ(testWorkingDir, "./node_1/dht.db")) - os.Remove(FJ(testWorkingDir, "./node_1/public.keystore")) + utils.RemoveAll(FJ(testWorkingDir, "./node_1/public.keystore*")) if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./node_1/config.yaml"), @@ -91,7 +91,7 @@ func start3BPs() { } os.Remove(FJ(testWorkingDir, "./node_2/chain.db")) os.Remove(FJ(testWorkingDir, "./node_2/dht.db")) - os.Remove(FJ(testWorkingDir, "./node_2/public.keystore")) + utils.RemoveAll(FJ(testWorkingDir, "./node_2/public.keystore*")) if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./node_2/config.yaml"), @@ -104,7 +104,7 @@ func start3BPs() { log.Errorf("start node failed: %v", err) } os.Remove(FJ(testWorkingDir, "./node_c/dht.db")) - os.Remove(FJ(testWorkingDir, "./node_c/public.keystore")) + utils.RemoveAll(FJ(testWorkingDir, "./node_c/public.keystore*")) } func stopNodes() { diff --git a/consistent/consistent_test.go b/consistent/consistent_test.go index 2dfabb451..3b000c358 100644 --- a/consistent/consistent_test.go +++ b/consistent/consistent_test.go @@ -18,7 +18,6 @@ package consistent import ( "math/rand" - "os" "runtime" "sort" "strconv" @@ -31,6 +30,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" . "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -41,7 +41,7 @@ var o sync.Once func init() { o.Do(func() { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") x, _ := InitConsistent(testStorePath, new(KMSStorage), false) if x == nil { log.Fatal("InitConsistent failed") @@ -68,10 +68,10 @@ func NewNodeFromString(id string) Node { func TestNew(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") if x == nil { t.Error("expected obj") } @@ -79,11 +79,11 @@ func TestNew(t *testing.T) { func TestAdd(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) CheckNum(len(x.circle), x.NumberOfReplicas, t) CheckNum(len(x.sortedHashes), x.NumberOfReplicas, t) @@ -100,11 +100,11 @@ func TestAdd(t *testing.T) { func TestRemove(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Remove("0000000000000000000000000000000000000000000000000000000000000000") CheckNum(len(x.circle), 0, t) @@ -113,11 +113,11 @@ func TestRemove(t *testing.T) { func TestRemoveNonExisting(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Remove("0000000000000000000000000000000000000000000000000000000000000000hijk") CheckNum(len(x.circle), x.NumberOfReplicas, t) @@ -125,11 +125,11 @@ func TestRemoveNonExisting(t *testing.T) { func TestGetEmpty(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") _, err := x.GetNeighbor("asdfsadfsadf") if err == nil { t.Error("expected error") @@ -141,11 +141,11 @@ func TestGetEmpty(t *testing.T) { func TestGetSingle(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) f := func(s string) bool { y, err := x.GetNeighbor(s) @@ -162,11 +162,11 @@ func TestGetSingle(t *testing.T) { } func TestConsistent_GetNode(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() nodeID := "40f26f9c816577adcb271734fec72c7640f26f9c816577adcb271734fec72c76" x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString(nodeID)) f := func(s string) bool { _, err := x.GetNode(s) @@ -197,11 +197,11 @@ var gmtests = []gtest{ func TestGetMultiple(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -218,11 +218,11 @@ func TestGetMultiple(t *testing.T) { func TestGetMultipleQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -254,11 +254,11 @@ var rtestsAfter = []gtest{ func TestGetMultipleRemove(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -285,11 +285,11 @@ func TestGetMultipleRemove(t *testing.T) { func TestGetMultipleRemoveQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -310,11 +310,11 @@ func TestGetMultipleRemoveQuick(t *testing.T) { func TestGetTwo(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -335,11 +335,11 @@ func TestGetTwo(t *testing.T) { func TestGetTwoEmpty(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") _, _, err := x.GetTwoNeighbors("9999999999999999999999999999999999999999999999999999999999999999") if err != ErrEmptyCircle { t.Fatal(err) @@ -348,11 +348,11 @@ func TestGetTwoEmpty(t *testing.T) { func TestGetTwoQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -384,11 +384,11 @@ func TestGetTwoQuick(t *testing.T) { func TestGetTwoOnlyTwoQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) f := func(s string) bool { @@ -419,11 +419,11 @@ func TestGetTwoOnlyTwoQuick(t *testing.T) { func TestGetTwoOnlyOneInCircle(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) a, b, err := x.GetTwoNeighbors("9999999999999999999999999999999999999999999999999999999999999999") if err != nil { @@ -442,11 +442,11 @@ func TestGetTwoOnlyOneInCircle(t *testing.T) { func TestGetN(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -471,11 +471,11 @@ func TestGetN(t *testing.T) { func TestGetNFilterRole(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") n := NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000") n.Role = Leader x.Add(n) @@ -509,11 +509,11 @@ func TestGetNFilterRole(t *testing.T) { func TestGetNLess(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -534,11 +534,11 @@ func TestGetNLess(t *testing.T) { func TestGetNMore(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -562,11 +562,11 @@ func TestGetNMore(t *testing.T) { func TestGetNEmpty(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") members, err := x.GetNeighbors("9999999", 5) if err != ErrEmptyCircle { t.Fatal(err) @@ -578,11 +578,11 @@ func TestGetNEmpty(t *testing.T) { func TestGetNQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -617,11 +617,11 @@ func TestGetNQuick(t *testing.T) { func TestGetNLessQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -656,11 +656,11 @@ func TestGetNLessQuick(t *testing.T) { func TestGetNMoreQuick(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000000000000000000000000000000000000000000000000000000000000000")) x.Add(NewNodeFromString("1111111111111111111111111111111111111111111111111111111111111111")) x.Add(NewNodeFromString("2222222222222222222222222222222222222222222222222222222222222222")) @@ -695,11 +695,11 @@ func TestGetNMoreQuick(t *testing.T) { func TestSet(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("0000")) x.Add(NewNodeFromString("1111")) x.Add(NewNodeFromString("2222")) @@ -778,11 +778,11 @@ func mallocNum(f func()) uint64 { func BenchmarkAllocations(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") //kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("stays")) b.ResetTimer() allocSize := allocBytes(func() { @@ -796,11 +796,11 @@ func BenchmarkAllocations(b *testing.B) { func BenchmarkMalloc(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("stays")) b.ResetTimer() mallocs := mallocNum(func() { @@ -814,11 +814,11 @@ func BenchmarkMalloc(b *testing.B) { func BenchmarkCycle(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("nothing")) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -829,11 +829,11 @@ func BenchmarkCycle(b *testing.B) { func BenchmarkCycleLarge(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") for i := 0; i < 10; i++ { x.Add(NewNodeFromString("start" + strconv.Itoa(i))) } @@ -846,11 +846,11 @@ func BenchmarkCycleLarge(b *testing.B) { func BenchmarkGet(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("nothing")) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -860,11 +860,11 @@ func BenchmarkGet(b *testing.B) { func BenchmarkGetLarge(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") for i := 0; i < 10; i++ { x.Add(NewNodeFromString("start" + strconv.Itoa(i))) } @@ -876,11 +876,11 @@ func BenchmarkGetLarge(b *testing.B) { func BenchmarkGetN(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("nothing")) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -890,11 +890,11 @@ func BenchmarkGetN(b *testing.B) { func BenchmarkGetNLarge(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") for i := 0; i < 10; i++ { x.Add(NewNodeFromString("start" + strconv.Itoa(i))) } @@ -906,11 +906,11 @@ func BenchmarkGetNLarge(b *testing.B) { func BenchmarkGetTwo(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString("nothing")) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -920,11 +920,11 @@ func BenchmarkGetTwo(b *testing.B) { func BenchmarkGetTwoLarge(b *testing.B) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") for i := 0; i < 10; i++ { x.Add(NewNodeFromString("start" + strconv.Itoa(i))) } @@ -941,11 +941,11 @@ func TestAddCollision(t *testing.T) { const s1 = "111111" const s2 = "222222" kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Add(NewNodeFromString(s1)) x.Add(NewNodeFromString(s2)) elt1, err := x.GetNeighbor("111111") @@ -954,7 +954,7 @@ func TestAddCollision(t *testing.T) { } y, _ := InitConsistent(testStorePath+"2", new(KMSStorage), false) - defer os.Remove(testStorePath + "2") + defer utils.RemoveAll(testStorePath + "*") // add elements in opposite order y.Add(NewNodeFromString(s2)) y.Add(NewNodeFromString(s1)) @@ -970,11 +970,11 @@ func TestAddCollision(t *testing.T) { func TestConcurrentGetSet(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath) + utils.RemoveAll(testStorePath + "*") kms.ResetBucket() x, _ := InitConsistent(testStorePath, new(KMSStorage), false) - defer os.Remove(testStorePath) + defer utils.RemoveAll(testStorePath + "*") x.Set([]Node{NewNodeFromString("0000"), NewNodeFromString("1111"), NewNodeFromString("2222"), NewNodeFromString("3333"), NewNodeFromString("4444")}) var wg sync.WaitGroup diff --git a/consistent/load_test.go b/consistent/load_test.go index 5fb2aadc5..b771498bc 100644 --- a/consistent/load_test.go +++ b/consistent/load_test.go @@ -17,7 +17,6 @@ package consistent import ( - "os" "sort" "testing" @@ -32,8 +31,8 @@ const testStorePath2 = "./test2.keystore" func TestSaveDHT(t *testing.T) { kms.Unittest = true - os.Remove(testStorePath1) - os.Remove(testStorePath2) + utils.RemoveAll(testStorePath1 + "*") + utils.RemoveAll(testStorePath2 + "*") //kms.ResetBucket() Convey("save DHT", t, func() { @@ -52,8 +51,8 @@ func TestLoadDHT(t *testing.T) { Convey("load existing DHT", t, func() { kms.Unittest = true x, _ := InitConsistent(testStorePath2, new(KMSStorage), false) - defer os.Remove(testStorePath1) - defer os.Remove(testStorePath2) + defer utils.RemoveAll(testStorePath1 + "*") + defer utils.RemoveAll(testStorePath2 + "*") // with BP node, there should be 3 nodes So(len(x.circle), ShouldEqual, x.NumberOfReplicas*2) So(len(x.sortedHashes), ShouldEqual, x.NumberOfReplicas*2) diff --git a/crypto/kms/pubkeystore_test.go b/crypto/kms/pubkeystore_test.go index 04dfc2cc7..1f6c5e041 100644 --- a/crypto/kms/pubkeystore_test.go +++ b/crypto/kms/pubkeystore_test.go @@ -36,7 +36,6 @@ import ( const dbFile = ".test.keystore" func TestDB(t *testing.T) { - log.SetLevel(log.DebugLevel) privKey1, pubKey1, _ := asymmetric.GenSecp256k1KeyPair() privKey2, pubKey2, _ := asymmetric.GenSecp256k1KeyPair() node1 := &proto.Node{ @@ -59,9 +58,9 @@ func TestDB(t *testing.T) { } Convey("Init db", t, func() { - pks = nil - os.Remove(dbFile) - defer os.Remove(dbFile) + ClosePublicKeyStore() + utils.RemoveAll(dbFile + "*") + defer utils.RemoveAll(dbFile + "*") InitPublicKeyStore(dbFile, []proto.Node{*BPNode}) nodeInfo, err := GetNodeInfo(BP.NodeID) @@ -138,11 +137,9 @@ func TestDB(t *testing.T) { func TestInvalidKeystoreFileRecover(t *testing.T) { Convey("invalid file recover", t, func() { - os.Remove(dbFile + ".bak") - os.Remove(dbFile) - defer os.Remove(dbFile + ".bak") - defer os.Remove(dbFile) - pks = nil + ClosePublicKeyStore() + utils.RemoveAll(dbFile + "*") + defer utils.RemoveAll(dbFile + "*") var err error err = ioutil.WriteFile(dbFile, []byte("UNKNOWN_DATA_MUST_NOT_BE_A_SQLITE_DATABASE"), 0600) So(err, ShouldBeNil) @@ -158,13 +155,10 @@ func TestInvalidKeystoreFileRecover(t *testing.T) { }) Convey("backup keystore file should not be overwritten if exists", t, func() { + ClosePublicKeyStore() backupFile := dbFile + ".bak" - os.Remove(backupFile) - os.Remove(dbFile) - defer os.Remove(backupFile) - defer os.Remove(dbFile) - - pks = nil + utils.RemoveAll(dbFile + "*") + defer utils.RemoveAll(dbFile + "*") var err error err = ioutil.WriteFile(dbFile, []byte("backup_1"), 0600) @@ -186,8 +180,9 @@ func TestInvalidKeystoreFileRecover(t *testing.T) { }) Convey("sqlite keystore should not be truncated", t, func() { - os.Remove(dbFile) - defer os.Remove(dbFile) + ClosePublicKeyStore() + utils.RemoveAll(dbFile + "*") + defer utils.RemoveAll(dbFile + "*") _, pubKey1, _ := asymmetric.GenSecp256k1KeyPair() node1 := &proto.Node{ @@ -209,7 +204,7 @@ func TestInvalidKeystoreFileRecover(t *testing.T) { So(err, ShouldBeNil) // clear and init again - pks = nil + ClosePublicKeyStore() err = InitPublicKeyStore(dbFile, nil) So(err, ShouldBeNil) @@ -223,11 +218,27 @@ func TestInvalidKeystoreFileRecover(t *testing.T) { So(err, ShouldNotBeNil) So(os.IsNotExist(err), ShouldBeTrue) }) + + Convey("test empty file", t, func() { + ClosePublicKeyStore() + utils.RemoveAll(dbFile + "*") + defer utils.RemoveAll(dbFile + "*") + + err := ioutil.WriteFile(dbFile, []byte{}, 0600) + So(err, ShouldBeNil) + + err = InitPublicKeyStore(dbFile, nil) + So(err, ShouldBeNil) + + st, err := os.Stat(dbFile + ".bak") + So(err, ShouldBeNil) + So(st.Size(), ShouldEqual, 0) + }) } func TestErrorPath(t *testing.T) { Convey("can not init db", t, func() { - pks = nil + ClosePublicKeyStore() err := InitPublicKeyStore("/path/not/exist", nil) So(pks, ShouldBeNil) So(err, ShouldNotBeNil) diff --git a/metric/rpc_test.go b/metric/rpc_test.go index 34e3a2258..1efe21791 100644 --- a/metric/rpc_test.go +++ b/metric/rpc_test.go @@ -17,7 +17,6 @@ package metric import ( - "os" "testing" "time" @@ -29,13 +28,14 @@ import ( "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) const PubKeyStorePath = "./public.keystore" func TestCollectClient_UploadMetrics(t *testing.T) { - defer os.Remove(PubKeyStorePath) + defer utils.RemoveAll(PubKeyStorePath + "*") log.SetLevel(log.DebugLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") diff --git a/route/acl_test.go b/route/acl_test.go index 608adbcea..4ba96e230 100644 --- a/route/acl_test.go +++ b/route/acl_test.go @@ -18,7 +18,6 @@ package route import ( "fmt" - "os" "path/filepath" "runtime" "sync" @@ -29,6 +28,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -36,8 +36,8 @@ const PubKeyStorePath = "./acl.keystore" func TestIsPermitted(t *testing.T) { log.SetLevel(log.DebugLevel) - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") _, testFile, _, _ := runtime.Caller(0) confFile := filepath.Join(filepath.Dir(testFile), "../test/node_0/config.yaml") diff --git a/route/service_test.go b/route/service_test.go index 86e8d7fb9..0a16330e4 100644 --- a/route/service_test.go +++ b/route/service_test.go @@ -20,7 +20,6 @@ import ( "fmt" "net" "net/rpc" - "os" "strings" "testing" "time" @@ -35,11 +34,11 @@ import ( "github.com/CovenantSQL/CovenantSQL/utils/log" ) -const DHTStorePath = "./DHTStore" +const DHTStorePath = "./DHTStore.keystore" func TestDHTService_FindNeighbor_FindNode(t *testing.T) { - os.Remove(DHTStorePath) - defer os.Remove(DHTStorePath + "1") + utils.RemoveAll(DHTStorePath + "*") + defer utils.RemoveAll(DHTStorePath + "*") log.SetLevel(log.DebugLevel) addr := "127.0.0.1:0" dht, _ := NewDHTService(DHTStorePath+"1", new(consistent.KMSStorage), false) @@ -202,8 +201,8 @@ func TestDHTService_FindNeighbor_FindNode(t *testing.T) { } func TestDHTService_Ping(t *testing.T) { - os.Remove(DHTStorePath) - defer os.Remove(DHTStorePath) + utils.RemoveAll(DHTStorePath + "*") + defer utils.RemoveAll(DHTStorePath + "*") log.SetLevel(log.DebugLevel) addr := "127.0.0.1:0" diff --git a/rpc/client_test.go b/rpc/client_test.go index eafebc5e3..26ebff9a5 100644 --- a/rpc/client_test.go +++ b/rpc/client_test.go @@ -62,7 +62,7 @@ func TestDial(t *testing.T) { //func TestDialToNode(t *testing.T) { // Convey("DialToNode error case", t, func() { -// defer os.Remove(publicKeyStore) +// defer utils.RemoveAll(publicKeyStore + "*") // defer os.Remove(privateKey) // c, err := DialToNode(kms.BP.NodeID, nil, false) // So(c, ShouldBeNil) diff --git a/rpc/leak_test.go b/rpc/leak_test.go index 9e93bc9f1..e059bfa9c 100644 --- a/rpc/leak_test.go +++ b/rpc/leak_test.go @@ -39,7 +39,7 @@ func TestSessionPool_SessionBroken(t *testing.T) { t.Errorf("load config from %s failed: %s", FJ(testWorkingDir, "./leak/client.yaml"), err) } log.Debugf("GConf: %##v", conf.GConf) - _ = os.Remove(conf.GConf.PubKeyStoreFile) + utils.RemoveAll(conf.GConf.PubKeyStoreFile + "*") _ = os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db")) _ = os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-shm")) _ = os.Remove(FJ(testWorkingDir, "./leak/leader/dht.db-wal")) diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index a60669fcb..57963d432 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -20,7 +20,6 @@ import ( "context" "fmt" "io/ioutil" - "os" "path/filepath" "runtime" "strings" @@ -46,10 +45,10 @@ const ( func TestCaller_CallNode(t *testing.T) { log.SetLevel(log.FatalLevel) - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) - os.Remove(publicKeyStore) - defer os.Remove(publicKeyStore) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") + utils.RemoveAll(publicKeyStore + "*") + defer utils.RemoveAll(publicKeyStore + "*") _, testFile, _, _ := runtime.Caller(0) confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") @@ -166,10 +165,10 @@ func TestCaller_CallNode(t *testing.T) { func TestNewPersistentCaller(t *testing.T) { log.SetLevel(log.FatalLevel) - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) - os.Remove(publicKeyStore) - defer os.Remove(publicKeyStore) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") + utils.RemoveAll(publicKeyStore + "*") + defer utils.RemoveAll(publicKeyStore + "*") var d string var err error @@ -305,10 +304,10 @@ func TestNewPersistentCaller(t *testing.T) { func BenchmarkPersistentCaller_CallKayakLog(b *testing.B) { log.SetLevel(log.FatalLevel) - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) - os.Remove(publicKeyStore) - defer os.Remove(publicKeyStore) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") + utils.RemoveAll(publicKeyStore + "*") + defer utils.RemoveAll(publicKeyStore + "*") ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) defer cancel() @@ -381,10 +380,10 @@ func (s *fakeService) Call(req *FakeRequest, resp *interface{}) (err error) { func BenchmarkPersistentCaller_Call(b *testing.B) { log.SetLevel(log.InfoLevel) - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) - os.Remove(publicKeyStore) - defer os.Remove(publicKeyStore) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") + utils.RemoveAll(publicKeyStore + "*") + defer utils.RemoveAll(publicKeyStore + "*") ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) defer cancel() diff --git a/rpc/server_test.go b/rpc/server_test.go index a5d6f9574..1d33bf987 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -18,7 +18,6 @@ package rpc import ( "net" - "os" "testing" "time" @@ -29,6 +28,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/proto" "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" ) @@ -149,7 +149,7 @@ func TestIncCounterSimpleArgs(t *testing.T) { } func TestEncryptIncCounterSimpleArgs(t *testing.T) { - defer os.Remove(PubKeyStorePath) + defer utils.RemoveAll(PubKeyStorePath + "*") log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") @@ -187,7 +187,7 @@ func TestEncryptIncCounterSimpleArgs(t *testing.T) { } func TestETLSBug(t *testing.T) { - defer os.Remove(PubKeyStorePath) + defer utils.RemoveAll(PubKeyStorePath + "*") log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") @@ -233,8 +233,8 @@ func TestETLSBug(t *testing.T) { } func TestEncPingFindNeighbor(t *testing.T) { - os.Remove(PubKeyStorePath) - defer os.Remove(PubKeyStorePath) + utils.RemoveAll(PubKeyStorePath + "*") + defer utils.RemoveAll(PubKeyStorePath + "*") log.SetLevel(log.FatalLevel) addr := "127.0.0.1:0" masterKey := []byte("abc") diff --git a/twopc/twopc_test.go b/twopc/twopc_test.go index 87a6862d1..f4729a4fa 100644 --- a/twopc/twopc_test.go +++ b/twopc/twopc_test.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "github.com/CovenantSQL/CovenantSQL/twopc" "net" "os" "strconv" @@ -28,6 +27,8 @@ import ( "testing" "time" + "github.com/CovenantSQL/CovenantSQL/twopc" + "github.com/CovenantSQL/CovenantSQL/crypto/etls" "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/utils/log" diff --git a/utils/remove.go b/utils/remove.go new file mode 100644 index 000000000..c650a6f45 --- /dev/null +++ b/utils/remove.go @@ -0,0 +1,34 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "os" + "path/filepath" +) + +// RemoveAll removes files using glob. +func RemoveAll(pattern string) { + files, err := filepath.Glob(pattern) + if err != nil { + return + } + + for _, file := range files { + _ = os.RemoveAll(file) + } +} diff --git a/utils/remove_test.go b/utils/remove_test.go new file mode 100644 index 000000000..e9de6b487 --- /dev/null +++ b/utils/remove_test.go @@ -0,0 +1,48 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package utils + +import ( + "io/ioutil" + "os" + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestRemoveAll(t *testing.T) { + Convey("test remove files", t, func() { + var names []string + tempPattern := "_tempfile_test_never_duplicate_*" + f, err := ioutil.TempFile(".", tempPattern) + So(err, ShouldBeNil) + names = append(names, f.Name()) + _ = f.Close() + f, err = ioutil.TempFile(".", tempPattern) + So(err, ShouldBeNil) + names = append(names, f.Name()) + _ = f.Close() + + RemoveAll(tempPattern) + + for _, name := range names { + _, err := os.Stat(name) + So(err, ShouldNotBeNil) + So(os.IsNotExist(err), ShouldBeTrue) + } + }) +} diff --git a/worker/chainbusservice_test.go b/worker/chainbusservice_test.go index 4ea1d577e..fab879773 100644 --- a/worker/chainbusservice_test.go +++ b/worker/chainbusservice_test.go @@ -151,9 +151,9 @@ func initNodeChainBusService() (cleanupFunc func(), server *rpc.Server, err erro // init conf _, testFile, _, _ := runtime.Caller(0) pubKeyStoreFile := filepath.Join(d, PubKeyStorePath) - os.Remove(pubKeyStoreFile) + utils.RemoveAll(pubKeyStoreFile + "*") clientPubKeyStoreFile := filepath.Join(d, PubKeyStorePath+"_c") - os.Remove(clientPubKeyStoreFile) + utils.RemoveAll(clientPubKeyStoreFile + "*") dupConfFile := filepath.Join(d, "config.yaml") confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") if err = utils.DupConf(confFile, dupConfFile); err != nil { diff --git a/worker/helper_test.go b/worker/helper_test.go index b9a09a186..9810f098e 100644 --- a/worker/helper_test.go +++ b/worker/helper_test.go @@ -193,9 +193,9 @@ func initNode() (cleanupFunc func(), server *rpc.Server, err error) { // init conf _, testFile, _, _ := runtime.Caller(0) pubKeyStoreFile := filepath.Join(d, PubKeyStorePath) - os.Remove(pubKeyStoreFile) + utils.RemoveAll(pubKeyStoreFile + "*") clientPubKeyStoreFile := filepath.Join(d, PubKeyStorePath+"_c") - os.Remove(clientPubKeyStoreFile) + utils.RemoveAll(clientPubKeyStoreFile + "*") dupConfFile := filepath.Join(d, "config.yaml") confFile := filepath.Join(filepath.Dir(testFile), "../test/node_standalone/config.yaml") if err = utils.DupConf(confFile, dupConfFile); err != nil { From 9d3ba9bd1871075f8e33f232b2a269312a86d6a7 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 15:00:12 +0800 Subject: [PATCH 162/244] Remove dangling observer source code --- cmd/cql-observer/node.go | 53 ---------------------------------------- 1 file changed, 53 deletions(-) delete mode 100644 cmd/cql-observer/node.go diff --git a/cmd/cql-observer/node.go b/cmd/cql-observer/node.go deleted file mode 100644 index c49738de7..000000000 --- a/cmd/cql-observer/node.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2019 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "fmt" - "syscall" - - "golang.org/x/crypto/ssh/terminal" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/route" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -func initNode() (err error) { - var masterKey []byte - if !conf.GConf.IsTestMode { - fmt.Print("Type in Master key to continue:") - masterKey, err = terminal.ReadPassword(syscall.Stdin) - if err != nil { - fmt.Printf("Failed to read Master key: %v", err) - } - fmt.Println("") - } - - if err = kms.InitLocalKeyPair(conf.GConf.PrivateKeyFile, masterKey); err != nil { - log.WithError(err).Error("init local key pair failed") - return - } - - log.Info("init routes") - - // init kms routing - route.InitKMS(conf.GConf.PubKeyStoreFile) - - return -} From 3b174243f38825644b528707d0a0f59f671aa184 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Wed, 20 Mar 2019 16:50:30 +0800 Subject: [PATCH 163/244] Update observer api prefix --- sqlchain/observer/api.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sqlchain/observer/api.go b/sqlchain/observer/api.go index 24a8ca4b2..71ae2ccb5 100644 --- a/sqlchain/observer/api.go +++ b/sqlchain/observer/api.go @@ -688,7 +688,8 @@ func startAPI(service *Service, listenAddr string, version string) (server *http api := &explorerAPI{ service: service, } - v1Router := router.PathPrefix(apiProxyPrefix + "/v1").Subrouter() + apiRouter := router.PathPrefix(apiProxyPrefix).Subrouter() + v1Router := apiRouter.PathPrefix("/v1").Subrouter() v1Router.HandleFunc("/ack/{db}/{hash}", api.GetAck).Methods("GET") v1Router.HandleFunc("/offset/{db}/{offset:[0-9]+}", func(writer http.ResponseWriter, request *http.Request) { @@ -700,9 +701,9 @@ func startAPI(service *Service, listenAddr string, version string) (server *http v1Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCount).Methods("GET") v1Router.HandleFunc("/height/{db}/{height:[0-9]+}", api.GetBlockByHeight).Methods("GET") v1Router.HandleFunc("/head/{db}", api.GetHighestBlock).Methods("GET") - v2Router := router.PathPrefix(apiProxyPrefix + "/v2").Subrouter() + v2Router := apiRouter.PathPrefix("/v2").Subrouter() v2Router.HandleFunc("/head/{db}", api.GetHighestBlockV2).Methods("GET") - v3Router := router.PathPrefix(apiProxyPrefix + "/v3").Subrouter() + v3Router := apiRouter.PathPrefix("/v3").Subrouter() v3Router.HandleFunc("/response/{db}/{hash}", api.GetResponse).Methods("GET") v3Router.HandleFunc("/block/{db}/{hash}", api.GetBlockV3).Methods("GET") v3Router.HandleFunc("/count/{db}/{count:[0-9]+}", api.GetBlockByCountV3).Methods("GET") From 2edbeeec746d13e167e666a8702d1c26e714503d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 10:58:29 +0800 Subject: [PATCH 164/244] Remove blank line in sqlchain/observer --- sqlchain/observer/worker.go | 1 - 1 file changed, 1 deletion(-) diff --git a/sqlchain/observer/worker.go b/sqlchain/observer/worker.go index 05d4388a6..918d0640e 100644 --- a/sqlchain/observer/worker.go +++ b/sqlchain/observer/worker.go @@ -53,7 +53,6 @@ func (w *subscribeWorker) run() { var nextTick time.Duration for { - select { case <-w.stopCh: return From 45aa36bd09ff0797d6be71f69463e965af24f05f Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 11:04:32 +0800 Subject: [PATCH 165/244] Add mirror service --- cmd/cql/internal/mirror.go | 86 +++++++++++++ cmd/cql/main.go | 1 + sqlchain/mirror/server.go | 62 +++++++++ sqlchain/mirror/service.go | 258 +++++++++++++++++++++++++++++++++++++ 4 files changed, 407 insertions(+) create mode 100644 cmd/cql/internal/mirror.go create mode 100644 sqlchain/mirror/server.go create mode 100644 sqlchain/mirror/service.go diff --git a/cmd/cql/internal/mirror.go b/cmd/cql/internal/mirror.go new file mode 100644 index 000000000..aa4c2a6d6 --- /dev/null +++ b/cmd/cql/internal/mirror.go @@ -0,0 +1,86 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "github.com/CovenantSQL/CovenantSQL/sqlchain/mirror" + "github.com/CovenantSQL/CovenantSQL/utils" +) + +var ( + mirrorDatabase string // mirror database id + mirrorAddr string // mirror server rpc addr + + mirrorService *mirror.Service +) + +// CmdMirror is cql mirror command. +var CmdMirror = &Command{ + UsageLine: "cql mirror [-config file] [-tmp-path path] [-bg-log-level level] database address", + Short: "start a SQLChain database mirror", + Long: ` +Mirror command subscribes database updates and serves a read-only database mirror. +e.g. + cql mirror 127.0.0.1:8546 +`, +} + +func init() { + CmdMirror.Run = runMirror + + addCommonFlags(CmdMirror) + addBgServerFlag(CmdMirror) +} + +func startMirrorServer(mirrorDatabase string, mirrorAddr string) func() { + var err error + mirrorService, err = mirror.StartMirror(mirrorDatabase, mirrorAddr) + if err != nil { + ConsoleLog.WithError(err).Error("start mirror failed") + SetExitStatus(1) + return nil + } + + ConsoleLog.Infof("mirror server started on %s", mirrorAddr) + // TODO(): print sample command for cql to connect + + return func() { + mirror.StopMirror(mirrorService) + ConsoleLog.Info("mirror stopped") + } +} + +func runMirror(cmd *Command, args []string) { + configInit() + bgServerInit() + + if len(args) != 2 { + ConsoleLog.Error("Mirror command need database and listen address as parameters") + SetExitStatus(1) + return + } + + mirrorDatabase = args[0] + mirrorAddr = args[1] + + cancelFunc := startMirrorServer(mirrorDatabase, mirrorAddr) + ExitIfErrors() + defer cancelFunc() + + ConsoleLog.Printf("Ctrl + C to stop mirror server on %s\n", mirrorAddr) + <-utils.WaitForExit() +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 72e2972fc..0b343d254 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -38,6 +38,7 @@ func init() { internal.CmdBalance, internal.CmdTransfer, internal.CmdGrant, + internal.CmdMirror, internal.CmdExplorer, internal.CmdAdapter, internal.CmdVersion, diff --git a/sqlchain/mirror/server.go b/sqlchain/mirror/server.go new file mode 100644 index 000000000..ca828d81b --- /dev/null +++ b/sqlchain/mirror/server.go @@ -0,0 +1,62 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mirror + +import ( + "net" + + "github.com/pkg/errors" + + "github.com/CovenantSQL/CovenantSQL/rpc" +) + +func createServer(listenAddr string) (s *rpc.Server, err error) { + var l net.Listener + if l, err = net.Listen("tcp", listenAddr); err != nil { + err = errors.Wrap(err, "listen rpc server failed") + return + } + + s = rpc.NewServer() + s.SetListener(l) + + return +} + +// StartMirror starts the mirror server and start mirror database. +func StartMirror(database string, listenAddr string) (service *Service, err error) { + var server *rpc.Server + if server, err = createServer(listenAddr); err != nil { + return + } + + if service, err = NewService(database, server); err != nil { + return + } + + // start mirror + if err = service.start(); err != nil { + + } + + return +} + +// StopMirror stops the mirror server. +func StopMirror(service *Service) { + service.stop() +} diff --git a/sqlchain/mirror/service.go b/sqlchain/mirror/service.go new file mode 100644 index 000000000..23f2766dd --- /dev/null +++ b/sqlchain/mirror/service.go @@ -0,0 +1,258 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mirror + +import ( + "database/sql" + "fmt" + "io/ioutil" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/pkg/errors" + + "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/route" + "github.com/CovenantSQL/CovenantSQL/rpc" + "github.com/CovenantSQL/CovenantSQL/types" + "github.com/CovenantSQL/CovenantSQL/utils/log" + "github.com/CovenantSQL/CovenantSQL/worker" + x "github.com/CovenantSQL/CovenantSQL/xenomint" + xs "github.com/CovenantSQL/CovenantSQL/xenomint/sqlite" +) + +const ( + progressFileSuffix = ".progress" + dbFileSuffix = ".db3" +) + +var ( + // ErrNotReadQuery represents invalid query type for mirror service to respond. + ErrNotReadQuery = errors.New("only read query is supported") +) + +// Service defines a database mirror service handler. +type Service struct { + server *rpc.Server + dbID proto.DatabaseID + upstream proto.NodeID + progress int32 + strg *xs.SQLite3 + st *x.State + stopCh chan struct{} + wg sync.WaitGroup +} + +// NewService returns new mirror service handler. +func NewService(database string, server *rpc.Server) (s *Service, err error) { + var ( + dbProgressPath = database + progressFileSuffix + dbPath = database + dbFileSuffix + progress int32 + ) + + // load current progress + if rawProgress, err := ioutil.ReadFile(dbProgressPath); err != nil { + // not exists + progress = 0 + log.WithError(err).Warning("read progress file failed") + } else if rawIntProgress, err := strconv.ParseUint(string(rawProgress), 10, 32); err != nil { + // progress not valid + progress = 0 + log.WithError(err).Warning("parse progress file failed") + } else { + progress = int32(rawIntProgress) + } + + s = &Service{ + server: server, + dbID: proto.DatabaseID(database), + progress: progress, + stopCh: make(chan struct{}), + } + + if s.strg, err = xs.NewSqlite(dbPath); err != nil { + err = errors.Wrap(err, "open database file failed") + return + } + + s.st = x.NewState(sql.LevelDefault, proto.NodeID(""), s.strg) + + // register myself + if err = server.RegisterService(route.DBRPCName, s); err != nil { + err = errors.Wrap(err, "register rpc failed") + return + } + + return +} + +func (s *Service) start() (err error) { + // query sqlchain profile for peers info + var ( + req = new(types.QuerySQLChainProfileReq) + resp = new(types.QuerySQLChainProfileResp) + ) + + req.DBID = s.dbID + + if err = rpc.RequestBP(route.MCCQuerySQLChainProfile.String(), req, resp); err != nil { + err = errors.Wrap(err, "get peers for database failed") + return + } else if len(resp.Profile.Miners) == 0 { + err = errors.Wrap(err, "get empty peers for database") + return + } + + s.upstream = resp.Profile.Miners[0].NodeID + + // start subscriptions + s.wg.Add(2) + go s.run() + go func() { + defer s.wg.Done() + s.server.Serve() + }() + + return +} + +func (s *Service) run() { + defer s.wg.Done() + + var nextTick time.Duration + + for { + select { + case <-s.stopCh: + return + case <-time.After(nextTick): + if err := s.pull(s.getProgress()); err != nil { + nextTick = conf.GConf.SQLChainPeriod + } else { + nextTick /= 10 + } + } + } +} + +func (s *Service) pull(count int32) (err error) { + var ( + req = new(worker.ObserverFetchBlockReq) + resp = new(worker.ObserverFetchBlockResp) + next int32 + ) + + defer func() { + lf := log.WithFields(log.Fields{ + "req_count": count, + "count": resp.Count, + }) + + if err != nil { + lf.WithError(err).Debug("sync block failed") + } else { + if resp.Block != nil { + lf = lf.WithField("block", resp.Block.BlockHash()) + } else { + lf = lf.WithField("block", nil) + } + + lf.WithField("next", next).Debug("sync block success") + } + }() + + req.DatabaseID = s.dbID + req.Count = count + + if err = rpc.NewCaller().CallNode(s.upstream, route.DBSObserverFetchBlock.String(), req, resp); err != nil { + return + } + + if resp.Block == nil { + err = errors.New("nil block, try later") + return + } + + if err = s.saveBlock(resp.Block); err != nil { + err = errors.New("save block failed") + return + } + + if count < 0 { + next = resp.Count + 1 + } else { + next = count + 1 + } + + if atomic.CompareAndSwapInt32(&s.progress, count, next) { + s.saveProgress() + } + + return +} + +func (s *Service) saveBlock(b *types.Block) (err error) { + // save block + return s.st.ReplayBlock(b) +} + +func (s *Service) getProgress() int32 { + return atomic.LoadInt32(&s.progress) +} + +func (s *Service) saveProgress() { + progressFile := string(s.dbID) + progressFileSuffix + _ = ioutil.WriteFile(progressFile, []byte(fmt.Sprintf("%d", s.getProgress())), 0644) +} + +func (s *Service) stop() { + if s.stopCh != nil { + select { + case <-s.stopCh: + default: + close(s.stopCh) + } + } + s.server.Stop() + s.wg.Wait() +} + +// Query mocks DBS.Query for mirrored database. +func (s *Service) Query(req *types.Request, res *types.Response) (err error) { + if req.Header.QueryType != types.ReadQuery { + // only read query is supported + err = ErrNotReadQuery + return + } + + var r *types.Response + if _, r, err = s.st.Query(req, false); err != nil { + return + } + *res = *r + return +} + +// Ack mocks DBS.Ack for mirrored database. +func (s *Service) Ack(ack *types.Ack, _ *types.AckResponse) (err error) { + // no-op + return +} From ac7cac14de351a25112da242b6065eedb93b5a96 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 13:53:22 +0800 Subject: [PATCH 166/244] Use caller interface in client connection caller --- client/conn.go | 10 ++-- rpc/rawcaller.go | 130 +++++++++++++++++++++++++++++++++++++++++++++++ rpc/rpcutil.go | 10 ++++ 3 files changed, 145 insertions(+), 5 deletions(-) create mode 100644 rpc/rawcaller.go diff --git a/client/conn.go b/client/conn.go index e4b0a1ac6..e02e04679 100644 --- a/client/conn.go +++ b/client/conn.go @@ -55,7 +55,7 @@ type conn struct { type pconn struct { parent *conn ackCh chan *types.Ack - pCaller *rpc.PersistentCaller + pCaller rpc.PCaller } func newConn(cfg *Config) (c *conn, err error) { @@ -139,7 +139,7 @@ func (c *pconn) stopAckWorkers() { func (c *pconn) ackWorker() { var ( oneTime sync.Once - pc *rpc.PersistentCaller + pc rpc.PCaller err error ) @@ -150,10 +150,10 @@ ackWorkerLoop: break ackWorkerLoop } oneTime.Do(func() { - pc = rpc.NewPersistentCaller(c.pCaller.TargetID) + pc = c.pCaller.New() }) if err = ack.Sign(c.parent.privKey); err != nil { - log.WithField("target", pc.TargetID).WithError(err).Error("failed to sign ack") + log.WithField("target", pc.Target()).WithError(err).Error("failed to sign ack") continue } @@ -375,7 +375,7 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries "type": queryType.String(), "connID": connID, "seqNo": seqNo, - "target": uc.pCaller.TargetID, + "target": uc.pCaller.Target(), "source": c.localNodeID, }).WithError(err).Debug("send query") }() diff --git a/rpc/rawcaller.go b/rpc/rawcaller.go new file mode 100644 index 000000000..da735d793 --- /dev/null +++ b/rpc/rawcaller.go @@ -0,0 +1,130 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpc + +import ( + "io" + "net" + "net/rpc" + "strings" + "sync" + + "github.com/pkg/errors" +) + +// PCaller defines generic interface shared with PersistentCaller and RawCaller. +type PCaller interface { + Call(method string, request interface{}, reply interface{}) (err error) + Close() + Target() string + New() PCaller // returns new instance of current caller +} + +// RawCaller defines a raw rpc caller without any encryption. +type RawCaller struct { + targetAddr string + client *Client + sync.RWMutex +} + +// NewRawCaller creates the raw rpc caller to target node. +func NewRawCaller(targetAddr string) *RawCaller { + return &RawCaller{ + targetAddr: targetAddr, + } +} + +func (c *RawCaller) isClientValid() bool { + c.RLock() + defer c.RUnlock() + + return c.client != nil +} + +func (c *RawCaller) resetClient() (err error) { + c.Lock() + defer c.Unlock() + + if c.client != nil { + c.client.Close() + c.client = nil + } + + var conn net.Conn + if conn, err = net.Dial("tcp", c.targetAddr); err != nil { + err = errors.Wrapf(err, "dial to target %s failed", c.targetAddr) + return + } + + if c.client, err = InitClientConn(conn); err != nil { + c.client = nil + err = errors.Wrapf(err, "init client to target %s failed", c.targetAddr) + return + } + + return +} + +// Call issues client rpc call. +func (c *RawCaller) Call(method string, args interface{}, reply interface{}) (err error) { + if !c.isClientValid() { + if err = c.resetClient(); err != nil { + return + } + } + + c.RLock() + err = c.client.Call(method, args, reply) + c.RUnlock() + + if err != nil { + if err == io.EOF || + err == io.ErrUnexpectedEOF || + err == io.ErrClosedPipe || + err == rpc.ErrShutdown || + strings.Contains(strings.ToLower(err.Error()), "shut down") || + strings.Contains(strings.ToLower(err.Error()), "broken pipe") { + // if got EOF, retry once + reconnectErr := c.resetClient() + if reconnectErr != nil { + err = errors.Wrap(reconnectErr, "reconnect failed") + } + } + err = errors.Wrapf(err, "call %s failed", method) + } + return +} + +// Close release underlying connection resources. +func (c *RawCaller) Close() { + c.Lock() + defer c.Unlock() + if c.client != nil { + c.client.Close() + c.client = nil + } +} + +// Target returns the request target for logging purpose. +func (c *RawCaller) Target() string { + return c.targetAddr +} + +// New returns brand new caller. +func (c *RawCaller) New() PCaller { + return NewRawCaller(c.targetAddr) +} diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 2c1e47d51..7f53cd972 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -68,6 +68,16 @@ func NewPersistentCaller(target proto.NodeID) *PersistentCaller { } } +// Target returns the request target for logging purpose. +func (c *PersistentCaller) Target() string { + return string(c.TargetID) +} + +// New returns brand new persistent caller. +func (c *PersistentCaller) New() PCaller { + return NewPersistentCaller(c.TargetID) +} + func (c *PersistentCaller) initClient(isAnonymous bool) (err error) { c.Lock() defer c.Unlock() From c2e4dd18b7ac4224f6e26f5d7517e265cce5aee1 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 14:27:18 +0800 Subject: [PATCH 167/244] Add mirror server support for client package --- client/config.go | 8 ++++ client/conn.go | 81 +++++++++++++++++++++++--------------- sqlchain/mirror/service.go | 6 +++ 3 files changed, 63 insertions(+), 32 deletions(-) diff --git a/client/config.go b/client/config.go index b9bf5e936..0decccf26 100644 --- a/client/config.go +++ b/client/config.go @@ -25,6 +25,7 @@ import ( const ( paramUseLeader = "use_leader" paramUseFollower = "use_follower" + paramMirror = "mirror" ) // Config is a configuration parsed from a DSN string. @@ -40,6 +41,9 @@ type Config struct { // UseFollower use follower nodes to do queries UseFollower bool + + // Mirror option forces client to query from mirror server + Mirror string } // NewConfig creates a new config with default value. @@ -64,6 +68,9 @@ func (cfg *Config) FormatDSN() string { newQuery.Add(paramUseLeader, strconv.FormatBool(cfg.UseLeader)) } } + if cfg.Mirror != "" { + newQuery.Add(paramMirror, cfg.Mirror) + } u.RawQuery = newQuery.Encode() return u.String() @@ -90,6 +97,7 @@ func ParseDSN(dsn string) (cfg *Config, err error) { if !cfg.UseLeader && !cfg.UseFollower { cfg.UseLeader = true } + cfg.Mirror = q.Get(paramMirror) return cfg, nil } diff --git a/client/conn.go b/client/conn.go index e02e04679..351a69e80 100644 --- a/client/conn.go +++ b/client/conn.go @@ -84,39 +84,48 @@ func newConn(cfg *Config) (c *conn, err error) { return nil, errors.WithMessage(err, "cacheGetPeers failed") } - if cfg.UseLeader { + if cfg.Mirror != "" { c.leader = &pconn{ parent: c, - pCaller: rpc.NewPersistentCaller(peers.Leader), + pCaller: rpc.NewRawCaller(cfg.Mirror), + } + + // no ack workers required, mirror mode does not support ack worker + } else { + if cfg.UseLeader { + c.leader = &pconn{ + parent: c, + pCaller: rpc.NewPersistentCaller(peers.Leader), + } } - } - // choose a random follower node - if cfg.UseFollower && len(peers.Servers) > 1 { - for { - node := peers.Servers[randSource.Intn(len(peers.Servers))] - if node != peers.Leader { - c.follower = &pconn{ - parent: c, - pCaller: rpc.NewPersistentCaller(node), + // choose a random follower node + if cfg.UseFollower && len(peers.Servers) > 1 { + for { + node := peers.Servers[randSource.Intn(len(peers.Servers))] + if node != peers.Leader { + c.follower = &pconn{ + parent: c, + pCaller: rpc.NewPersistentCaller(node), + } + break } - break } } - } - if c.leader == nil && c.follower == nil { - return nil, errors.New("no follower peers found") - } + if c.leader == nil && c.follower == nil { + return nil, errors.New("no follower peers found") + } - if c.leader != nil { - if err := c.leader.startAckWorkers(2); err != nil { - return nil, errors.WithMessage(err, "leader startAckWorkers failed") + if c.leader != nil { + if err := c.leader.startAckWorkers(2); err != nil { + return nil, errors.WithMessage(err, "leader startAckWorkers failed") + } } - } - if c.follower != nil { - if err := c.follower.startAckWorkers(2); err != nil { - return nil, errors.WithMessage(err, "follower startAckWorkers failed") + if c.follower != nil { + if err := c.follower.startAckWorkers(2); err != nil { + return nil, errors.WithMessage(err, "follower startAckWorkers failed") + } } } @@ -133,7 +142,13 @@ func (c *pconn) startAckWorkers(workerCount int) (err error) { } func (c *pconn) stopAckWorkers() { - close(c.ackCh) + if c.ackCh != nil { + select { + case <-c.ackCh: + default: + close(c.ackCh) + } + } } func (c *pconn) ackWorker() { @@ -415,15 +430,17 @@ func (c *conn) sendQuery(ctx context.Context, queryType types.QueryType, queries // build ack func() { defer trace.StartRegion(ctx, "ackEnqueue").End() - uc.ackCh <- &types.Ack{ - Header: types.SignedAckHeader{ - AckHeader: types.AckHeader{ - Response: response.Header.ResponseHeader, - ResponseHash: response.Header.Hash(), - NodeID: c.localNodeID, - Timestamp: getLocalTime(), + if uc.ackCh != nil { + uc.ackCh <- &types.Ack{ + Header: types.SignedAckHeader{ + AckHeader: types.AckHeader{ + Response: response.Header.ResponseHeader, + ResponseHash: response.Header.Hash(), + NodeID: c.localNodeID, + Timestamp: getLocalTime(), + }, }, - }, + } } }() diff --git a/sqlchain/mirror/service.go b/sqlchain/mirror/service.go index 23f2766dd..683d28240 100644 --- a/sqlchain/mirror/service.go +++ b/sqlchain/mirror/service.go @@ -243,6 +243,12 @@ func (s *Service) Query(req *types.Request, res *types.Response) (err error) { return } + if req.Header.DatabaseID != s.dbID { + // database instance not matched + err = worker.ErrNotExists + return + } + var r *types.Response if _, r, err = s.st.Query(req, false); err != nil { return From bb47dc66f787d42ca2ae36c268d41257112515e7 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Thu, 21 Mar 2019 14:40:35 +0800 Subject: [PATCH 168/244] Support using mirror server in adapter --- cmd/cql/internal/adapter.go | 15 +++++++-------- cmd/cql/internal/console.go | 2 +- sqlchain/adapter/config/config.go | 3 ++- sqlchain/adapter/server.go | 5 ++++- sqlchain/adapter/storage/covenantsql.go | 11 ++++++++--- 5 files changed, 22 insertions(+), 14 deletions(-) diff --git a/cmd/cql/internal/adapter.go b/cmd/cql/internal/adapter.go index d54fad032..a0e2ca446 100644 --- a/cmd/cql/internal/adapter.go +++ b/cmd/cql/internal/adapter.go @@ -18,7 +18,6 @@ package internal import ( "context" - "net/http" "time" "github.com/CovenantSQL/CovenantSQL/sqlchain/adapter" @@ -26,14 +25,13 @@ import ( ) var ( - adapterAddr string // adapter listen addr - - adapterHTTPServer *http.Server + adapterAddr string // adapter listen addr + adapterUseMirrorAddr string ) // CmdAdapter is cql adapter command entity. var CmdAdapter = &Command{ - UsageLine: "cql adapter [-config file] [-tmp-path path] [-bg-log-level level] address", + UsageLine: "cql adapter [-config file] [-tmp-path path] [-bg-log-level level] [-mirror addr] address", Short: "start a SQLChain adapter", Long: ` Adapter command serves a SQLChain adapter @@ -44,13 +42,14 @@ e.g. func init() { CmdAdapter.Run = runAdapter + CmdAdapter.Flag.StringVar(&adapterUseMirrorAddr, "mirror", "", "mirror server for adapter to query") addCommonFlags(CmdAdapter) addBgServerFlag(CmdAdapter) } -func startAdapterServer(adapterAddr string) func() { - adapterHTTPServer, err := adapter.NewHTTPAdapter(adapterAddr, configFile) +func startAdapterServer(adapterAddr string, adapterUseMirrorAddr string) func() { + adapterHTTPServer, err := adapter.NewHTTPAdapter(adapterAddr, configFile, adapterUseMirrorAddr) if err != nil { ConsoleLog.WithError(err).Error("init adapter failed") SetExitStatus(1) @@ -84,7 +83,7 @@ func runAdapter(cmd *Command, args []string) { } adapterAddr = args[0] - cancelFunc := startAdapterServer(adapterAddr) + cancelFunc := startAdapterServer(adapterAddr, adapterUseMirrorAddr) ExitIfErrors() defer cancelFunc() diff --git a/cmd/cql/internal/console.go b/cmd/cql/internal/console.go index c125c233f..9e63e7bc3 100644 --- a/cmd/cql/internal/console.go +++ b/cmd/cql/internal/console.go @@ -365,7 +365,7 @@ func runConsole(cmd *Command, args []string) { } if adapterAddr != "" { - cancelFunc := startAdapterServer(adapterAddr) + cancelFunc := startAdapterServer(adapterAddr, "") defer cancelFunc() } diff --git a/sqlchain/adapter/config/config.go b/sqlchain/adapter/config/config.go index 8a89bcdb7..a78b40248 100644 --- a/sqlchain/adapter/config/config.go +++ b/sqlchain/adapter/config/config.go @@ -59,6 +59,7 @@ type Config struct { WriteCertificates []*x509.Certificate `yaml:"-"` // storage config + MirrorServer string `yaml:"Mirror"` // use mirror server for queries StorageDriver string `yaml:"StorageDriver"` // sqlite3 or covenantsql StorageRoot string `yaml:"StorageRoot"` StorageInstance storage.Storage `yaml:"-"` @@ -160,7 +161,7 @@ func LoadConfig(configPath string) (config *Config, err error) { // load storage switch config.StorageDriver { case "covenantsql": - config.StorageInstance = storage.NewCovenantSQLStorage() + config.StorageInstance = storage.NewCovenantSQLStorage(config.MirrorServer) case "sqlite3": storageRoot := filepath.Join(workingRoot, config.StorageRoot) if config.StorageInstance, err = storage.NewSQLite3Storage(storageRoot); err != nil { diff --git a/sqlchain/adapter/server.go b/sqlchain/adapter/server.go index 4c0611e3a..abfeb13d0 100644 --- a/sqlchain/adapter/server.go +++ b/sqlchain/adapter/server.go @@ -34,7 +34,7 @@ type HTTPAdapter struct { } // NewHTTPAdapter creates adapter to service. -func NewHTTPAdapter(listenAddr string, configFile string) (adapter *HTTPAdapter, err error) { +func NewHTTPAdapter(listenAddr string, configFile string, adapterUseMirrorAddr string) (adapter *HTTPAdapter, err error) { adapter = new(HTTPAdapter) // load config file @@ -46,6 +46,9 @@ func NewHTTPAdapter(listenAddr string, configFile string) (adapter *HTTPAdapter, if listenAddr != "" { cfg.ListenAddr = listenAddr } + if adapterUseMirrorAddr != "" { + cfg.MirrorServer = adapterUseMirrorAddr + } // init server handler := handlers.CORS( handlers.AllowedHeaders([]string{"Content-Type"}), diff --git a/sqlchain/adapter/storage/covenantsql.go b/sqlchain/adapter/storage/covenantsql.go index fb1772d9d..c92d1f365 100644 --- a/sqlchain/adapter/storage/covenantsql.go +++ b/sqlchain/adapter/storage/covenantsql.go @@ -23,11 +23,13 @@ import ( ) // CovenantSQLStorage defines the covenantsql database abstraction. -type CovenantSQLStorage struct{} +type CovenantSQLStorage struct { + mirrorServerAddr string +} // NewCovenantSQLStorage returns new covenantsql storage handler. -func NewCovenantSQLStorage() (s *CovenantSQLStorage) { - s = &CovenantSQLStorage{} +func NewCovenantSQLStorage(mirrorServerAddr string) (s *CovenantSQLStorage) { + s = &CovenantSQLStorage{mirrorServerAddr: mirrorServerAddr} return } @@ -116,6 +118,9 @@ func (s *CovenantSQLStorage) Exec(dbID string, query string, args ...interface{} func (s *CovenantSQLStorage) getConn(dbID string) (db *sql.DB, err error) { cfg := client.NewConfig() cfg.DatabaseID = dbID + if s.mirrorServerAddr != "" { + cfg.Mirror = s.mirrorServerAddr + } return sql.Open("covenantsql", cfg.FormatDSN()) } From 3a52377886bfc7afffce2f22c7fd8d418d814b28 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 10:59:33 +0800 Subject: [PATCH 169/244] Remove test binary flag in unit test --- sqlchain/observer/config_test.go | 2 -- sqlchain/observer/observation_test.go | 2 -- 2 files changed, 4 deletions(-) diff --git a/sqlchain/observer/config_test.go b/sqlchain/observer/config_test.go index 30326a664..2b6bd150a 100644 --- a/sqlchain/observer/config_test.go +++ b/sqlchain/observer/config_test.go @@ -1,5 +1,3 @@ -// +build !testbinary - /* * Copyright 2018 The CovenantSQL Authors. * diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 284b3a919..135249e7b 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -1,5 +1,3 @@ -// +build !testbinary - /* * Copyright 2018 The CovenantSQL Authors. * From 57156cf7c9631ac308eeb4c72881010534c67602 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 10:59:56 +0800 Subject: [PATCH 170/244] Add observer suffix in test coverage file name --- sqlchain/observer/observation_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 135249e7b..265c42d82 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -89,7 +89,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_0/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/leader.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/leader-observer.cover.out"), }, "leader", testWorkingDir, logDir, false, ); err == nil { @@ -100,7 +100,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_1/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower1.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower1-observer.cover.out"), }, "follower1", testWorkingDir, logDir, false, ); err == nil { @@ -111,7 +111,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cqld.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_2/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower2.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower2-observer.cover.out"), }, "follower2", testWorkingDir, logDir, false, ); err == nil { @@ -148,7 +148,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_miner_0/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner0.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner0-observer.cover.out"), }, "miner0", testWorkingDir, logDir, false, ); err == nil { @@ -161,7 +161,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_miner_1/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner1.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner1-observer.cover.out"), }, "miner1", testWorkingDir, logDir, false, ); err == nil { @@ -174,7 +174,7 @@ func startNodes() { if cmd, err = utils.RunCommandNB( FJ(baseDir, "./bin/cql-minerd.test"), []string{"-config", FJ(testWorkingDir, "./observation/node_miner_2/config.yaml"), - "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner2.cover.out"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner2-observer.cover.out"), }, "miner2", testWorkingDir, logDir, false, ); err == nil { From f9faf11e6dabbe44706c03f4c5300f03fe46a8df Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 13:21:59 +0800 Subject: [PATCH 171/244] Add mirror feature integration test --- sqlchain/mirror/mirror_test.go | 318 +++++++++++++++++++++++++++ sqlchain/mirror/service.go | 7 +- test/mirror/node_0/config.yaml | 125 +++++++++++ test/mirror/node_0/private.key | 2 + test/mirror/node_1/config.yaml | 125 +++++++++++ test/mirror/node_1/private.key | 2 + test/mirror/node_2/config.yaml | 125 +++++++++++ test/mirror/node_2/private.key | 2 + test/mirror/node_c/config.yaml | 109 +++++++++ test/mirror/node_c/private.key | Bin 0 -> 96 bytes test/mirror/node_miner_0/config.yaml | 105 +++++++++ test/mirror/node_miner_0/private.key | 1 + test/mirror/node_miner_1/config.yaml | 105 +++++++++ test/mirror/node_miner_1/private.key | 2 + test/mirror/node_miner_2/config.yaml | 105 +++++++++ test/mirror/node_miner_2/private.key | 1 + 16 files changed, 1131 insertions(+), 3 deletions(-) create mode 100644 sqlchain/mirror/mirror_test.go create mode 100644 test/mirror/node_0/config.yaml create mode 100644 test/mirror/node_0/private.key create mode 100644 test/mirror/node_1/config.yaml create mode 100644 test/mirror/node_1/private.key create mode 100644 test/mirror/node_2/config.yaml create mode 100644 test/mirror/node_2/private.key create mode 100644 test/mirror/node_c/config.yaml create mode 100644 test/mirror/node_c/private.key create mode 100644 test/mirror/node_miner_0/config.yaml create mode 100644 test/mirror/node_miner_0/private.key create mode 100644 test/mirror/node_miner_1/config.yaml create mode 100644 test/mirror/node_miner_1/private.key create mode 100644 test/mirror/node_miner_2/config.yaml create mode 100644 test/mirror/node_miner_2/private.key diff --git a/sqlchain/mirror/mirror_test.go b/sqlchain/mirror/mirror_test.go new file mode 100644 index 000000000..540a5224f --- /dev/null +++ b/sqlchain/mirror/mirror_test.go @@ -0,0 +1,318 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package mirror + +import ( + "context" + "database/sql" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strconv" + "sync" + "syscall" + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/CovenantSQL/CovenantSQL/client" + "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/test" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +var ( + baseDir = utils.GetProjectSrcDir() + testWorkingDir = FJ(baseDir, "./test/") + logDir = FJ(testWorkingDir, "./log/") +) + +var nodeCmds []*utils.CMD + +var FJ = filepath.Join + +func startNodes() { + // wait for ports to be available + var err error + ctx := context.Background() + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 5120, + 5121, + 5122, + }, time.Millisecond*200) + + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + var cmd *utils.CMD + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/leader-mirror.cover.out"), + }, + "leader", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower1-mirror.cover.out"), + }, + "follower1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cqld.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/follower2-mirror.cover.out"), + }, + "follower2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitToConnect(ctx, "127.0.0.1", []int{ + 5120, + 5121, + 5122, + }, time.Second) + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + ctx, cancel = context.WithTimeout(context.Background(), time.Second*30) + defer cancel() + err = utils.WaitForPorts(ctx, "127.0.0.1", []int{ + 5144, + 5145, + 5146, + }, time.Millisecond*200) + if err != nil { + log.Fatalf("wait for port ready timeout: %v", err) + } + + time.Sleep(10 * time.Second) + // start 3miners + os.RemoveAll(FJ(testWorkingDir, "./mirror/node_miner_0/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_miner_0/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner0-mirror.cover.out"), + }, + "miner0", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./mirror/node_miner_1/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_miner_1/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner1-mirror.cover.out"), + }, + "miner1", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } + + os.RemoveAll(FJ(testWorkingDir, "./mirror/node_miner_2/data")) + if cmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql-minerd.test"), + []string{"-config", FJ(testWorkingDir, "./mirror/node_miner_2/config.yaml"), + "-test.coverprofile", FJ(baseDir, "./cmd/cql/miner2-mirror.cover.out"), + }, + "miner2", testWorkingDir, logDir, false, + ); err == nil { + nodeCmds = append(nodeCmds, cmd) + } else { + log.Errorf("start node failed: %v", err) + } +} + +func stopNodes() { + var wg sync.WaitGroup + + for _, nodeCmd := range nodeCmds { + wg.Add(1) + go func(thisCmd *utils.CMD) { + defer wg.Done() + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Wait() + grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) + out, _ := grepRace.Output() + if len(out) > 2 { + log.Fatal(string(out)) + } + }(nodeCmd) + } + + wg.Wait() +} + +func waitForMirrorComplete(ctx context.Context, dbID string, tick time.Duration, stableDuration time.Duration) (err error) { + progressFile := FJ(testWorkingDir, "./mirror/node_mirror/"+dbID+progressFileSuffix) + lastProgress := 0 + lastUpdate := time.Now() + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(tick): + progressData, _ := ioutil.ReadFile(progressFile) + progressCount, _ := strconv.Atoi(string(progressData)) + if progressCount > lastProgress { + lastUpdate = time.Now() + } + if progressCount > 5 || (progressCount > 0 && time.Now().Sub(lastUpdate) > stableDuration) { + // mirror synced + return + } + } + } +} + +func TestFullProcess(t *testing.T) { + log.SetLevel(log.DebugLevel) + + Convey("test mirror full process", t, func() { + var ( + err error + ) + + startNodes() + defer stopNodes() + + err = client.Init(FJ(testWorkingDir, "./mirror/node_c/config.yaml"), []byte("")) + So(err, ShouldBeNil) + + // wait bp chain service to start + ctx, ccl1 := context.WithTimeout(context.Background(), 1*time.Minute) + defer ccl1() + err = test.WaitBPChainService(ctx, 3*time.Second) + So(err, ShouldBeNil) + + // create + meta := client.ResourceMeta{} + meta.Node = 1 + _, dsn, err := client.Create(meta) + So(err, ShouldBeNil) + dsnCfg, err := client.ParseDSN(dsn) + So(err, ShouldBeNil) + + log.Infof("the created database dsn is %v", dsn) + + db, err := sql.Open("covenantsql", dsn) + So(err, ShouldBeNil) + defer db.Close() + + // wait for creation + ctx, ccl2 := context.WithTimeout(context.Background(), 5*time.Minute) + defer ccl2() + err = client.WaitDBCreation(ctx, dsn) + So(err, ShouldBeNil) + + _, err = db.Exec("CREATE TABLE test (test int)") + So(err, ShouldBeNil) + + _, err = db.Exec("INSERT INTO test VALUES(?)", 4) + So(err, ShouldBeNil) + + row := db.QueryRow("SELECT * FROM test LIMIT 1") + + var result int + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 4) + + // run mirror node + utils.RemoveAll(FJ(testWorkingDir, "./mirror/node_mirror/"+dsnCfg.DatabaseID+"*")) + defer utils.RemoveAll(FJ(testWorkingDir, "./mirror/node_mirror/"+dsnCfg.DatabaseID+"*")) + + var mirrorCmd *utils.CMD + mirrorCmd, err = utils.RunCommandNB( + FJ(baseDir, "./bin/cql.test"), + []string{"-test.coverprofile", FJ(baseDir, "./cmd/cql/mirror.cover.out"), + "mirror", + "-config", FJ(testWorkingDir, "./mirror/node_mirror/config.yaml"), + "-no-password", + "-bg-log-level", "debug", + dsnCfg.DatabaseID, + "127.0.0.1:5663", + }, + "mirror", testWorkingDir, logDir, false, + ) + So(err, ShouldBeNil) + defer func() { + mirrorCmd.Cmd.Process.Signal(syscall.SIGTERM) + mirrorCmd.Cmd.Wait() + }() + + defer func() { + _ = mirrorCmd.Cmd.Process.Signal(os.Interrupt) + _ = mirrorCmd.Cmd.Wait() + }() + + err = utils.WaitToConnect(context.Background(), "127.0.0.1", []int{5663}, 200*time.Millisecond) + So(err, ShouldBeNil) + + time.Sleep(time.Second) + + // check subscription status, wait for 5 period + ctx, ccl3 := context.WithTimeout(context.Background(), 10*conf.GConf.SQLChainPeriod) + defer ccl3() + err = waitForMirrorComplete(ctx, dsnCfg.DatabaseID, conf.GConf.SQLChainPeriod, 2*conf.GConf.SQLChainPeriod) + So(err, ShouldBeNil) + + // mirror synced, query using mirror + dsnCfg.Mirror = "127.0.0.1:5663" + mirrorDSN := dsnCfg.FormatDSN() + log.Infof("the mirror dsn is %v", mirrorDSN) + + dbMirror, err := sql.Open("covenantsql", mirrorDSN) + So(err, ShouldBeNil) + defer dbMirror.Close() + + // test read query + row = dbMirror.QueryRow("SELECT * FROM test LIMIT 1") + err = row.Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 4) + + // test write query, must not success + _, err = dbMirror.Exec("INSERT INTO test VALUES(?)", 5) + So(err, ShouldNotBeNil) + }) +} diff --git a/sqlchain/mirror/service.go b/sqlchain/mirror/service.go index 683d28240..0f44b6b2e 100644 --- a/sqlchain/mirror/service.go +++ b/sqlchain/mirror/service.go @@ -20,6 +20,7 @@ import ( "database/sql" "fmt" "io/ioutil" + "path/filepath" "strconv" "sync" "sync/atomic" @@ -63,8 +64,8 @@ type Service struct { // NewService returns new mirror service handler. func NewService(database string, server *rpc.Server) (s *Service, err error) { var ( - dbProgressPath = database + progressFileSuffix - dbPath = database + dbFileSuffix + dbProgressPath = filepath.Join(conf.GConf.WorkingRoot, database+progressFileSuffix) + dbPath = filepath.Join(conf.GConf.WorkingRoot, database+dbFileSuffix) progress int32 ) @@ -219,7 +220,7 @@ func (s *Service) getProgress() int32 { } func (s *Service) saveProgress() { - progressFile := string(s.dbID) + progressFileSuffix + progressFile := filepath.Join(conf.GConf.WorkingRoot, string(s.dbID)+progressFileSuffix) _ = ioutil.WriteFile(progressFile, []byte(fmt.Sprintf("%d", s.getProgress())), 0644) } diff --git a/test/mirror/node_0/config.yaml b/test/mirror/node_0/config.yaml new file mode 100644 index 000000000..fed82017e --- /dev/null +++ b/test/mirror/node_0/config.yaml @@ -0,0 +1,125 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5122" +ThisNodeID: "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_0/private.key b/test/mirror/node_0/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/mirror/node_0/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/mirror/node_1/config.yaml b/test/mirror/node_1/config.yaml new file mode 100644 index 000000000..c1e448194 --- /dev/null +++ b/test/mirror/node_1/config.yaml @@ -0,0 +1,125 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5121" +ThisNodeID: "00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_1/private.key b/test/mirror/node_1/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/mirror/node_1/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/mirror/node_2/config.yaml b/test/mirror/node_2/config.yaml new file mode 100644 index 000000000..7b20dc22a --- /dev/null +++ b/test/mirror/node_2/config.yaml @@ -0,0 +1,125 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5120" +ThisNodeID: "000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z + BaseAccounts: + - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 + - Address: e4e1628477a17c969f3f915f4bc7c059c3fbcbaf37855bc55a811465ea2480af + StableCoinBalance: 1000000000 + CovenantCoinBalance: 1000000000 +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_2/private.key b/test/mirror/node_2/private.key new file mode 100644 index 000000000..449618c0a --- /dev/null +++ b/test/mirror/node_2/private.key @@ -0,0 +1,2 @@ +WAð8#|TZԓ`mF}~e ʆ?~ *E%vpo*a߂ç_Bľ@8 +MC2 \ No newline at end of file diff --git a/test/mirror/node_c/config.yaml b/test/mirror/node_c/config.yaml new file mode 100644 index 000000000..2222031a6 --- /dev/null +++ b/test/mirror/node_c/config.yaml @@ -0,0 +1,109 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5120" +ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_c/private.key b/test/mirror/node_c/private.key new file mode 100644 index 0000000000000000000000000000000000000000..f563980c1fcd669303b1bee9c2172bf5a3519b8c GIT binary patch literal 96 zcmV-m0H6PF*slzHCqzPE3aw^kxJ?Q%G%ogw14*THn=7~eV;?h-t?#^t5W+6R^1DgL z$@60LgW8>L#Ft4anW%5%J6f5~?krWm@CHc~TLX=J0P-Na@n`wgY{PEN*;2omcYC0; Ca4_ls literal 0 HcmV?d00001 diff --git a/test/mirror/node_miner_0/config.yaml b/test/mirror/node_miner_0/config.yaml new file mode 100644 index 000000000..4968cf123 --- /dev/null +++ b/test/mirror/node_miner_0/config.yaml @@ -0,0 +1,105 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5144" +ThisNodeID: "000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "3s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_miner_0/private.key b/test/mirror/node_miner_0/private.key new file mode 100644 index 000000000..12e7d3d80 --- /dev/null +++ b/test/mirror/node_miner_0/private.key @@ -0,0 +1 @@ +8s_/W-7IyH_DyTG*M9C#8p%x>SߪRLmPB>{:̜뢷|| \ No newline at end of file diff --git a/test/mirror/node_miner_1/config.yaml b/test/mirror/node_miner_1/config.yaml new file mode 100644 index 000000000..28a4cece9 --- /dev/null +++ b/test/mirror/node_miner_1/config.yaml @@ -0,0 +1,105 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5145" +ThisNodeID: "000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "3s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_miner_1/private.key b/test/mirror/node_miner_1/private.key new file mode 100644 index 000000000..44e8915e6 --- /dev/null +++ b/test/mirror/node_miner_1/private.key @@ -0,0 +1,2 @@ +s]](o3R +D5*9C 7ZinƋSp*SS5^ޑax>Xо2#IxRw+Ŕ \ No newline at end of file diff --git a/test/mirror/node_miner_2/config.yaml b/test/mirror/node_miner_2/config.yaml new file mode 100644 index 000000000..45ab4520c --- /dev/null +++ b/test/mirror/node_miner_2/config.yaml @@ -0,0 +1,105 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5146" +ThisNodeID: "000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +Miner: + IsTestMode: true + RootDir: "./data" + MaxReqTimeGap: "2s" + ProvideServiceInterval: "3s" +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d + Nonce: + a: 22403 + b: 0 + c: 0 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_miner_2/private.key b/test/mirror/node_miner_2/private.key new file mode 100644 index 000000000..adb437e75 --- /dev/null +++ b/test/mirror/node_miner_2/private.key @@ -0,0 +1 @@ +6 i.i%8pVVrLBKb: 1;(fF &y췥 RW3?CA;e"K2 \ No newline at end of file From 5fca901f1c259080ee0b5ad51711f1cd1ce3fd04 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 13:40:36 +0800 Subject: [PATCH 172/244] Enable log to stderr for mirror server --- sqlchain/mirror/mirror_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqlchain/mirror/mirror_test.go b/sqlchain/mirror/mirror_test.go index 540a5224f..2a9093e96 100644 --- a/sqlchain/mirror/mirror_test.go +++ b/sqlchain/mirror/mirror_test.go @@ -272,7 +272,7 @@ func TestFullProcess(t *testing.T) { dsnCfg.DatabaseID, "127.0.0.1:5663", }, - "mirror", testWorkingDir, logDir, false, + "mirror", testWorkingDir, logDir, true, ) So(err, ShouldBeNil) defer func() { From 1e4b7ce91debd8118290264c5fa0ebaa637903b3 Mon Sep 17 00:00:00 2001 From: auxten Date: Fri, 22 Mar 2019 13:50:37 +0800 Subject: [PATCH 173/244] Add non-padding test --- crypto/cipher_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/crypto/cipher_test.go b/crypto/cipher_test.go index c4de08cac..38a540dea 100644 --- a/crypto/cipher_test.go +++ b/crypto/cipher_test.go @@ -23,6 +23,7 @@ import ( . "github.com/smartystreets/goconvey/convey" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/utils/log" ) // Test 1: Encryption and decryption. @@ -153,4 +154,14 @@ func TestAddPKCSPadding(t *testing.T) { So(cleanData, ShouldResemble, data) So(err, ShouldBeNil) }) + Convey("non-padding", t, func() { + data := []byte("xxxxxxxxxxxxxxxx") + padData := AddPKCSPadding(data) + log.Infof("len %d after pkcs#7: %d", len(data), len(padData)) + cleanData, err := RemovePKCSPadding(padData) + + So(cleanData, ShouldResemble, data) + So(err, ShouldBeNil) + }) + } From f82a55ba4b01a498c31e6eb152135b0419e74499 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 13:55:40 +0800 Subject: [PATCH 174/244] Add missing mirror config file --- test/mirror/node_mirror/config.yaml | 109 ++++++++++++++++++++++++++++ test/mirror/node_mirror/private.key | Bin 0 -> 96 bytes 2 files changed, 109 insertions(+) create mode 100644 test/mirror/node_mirror/config.yaml create mode 100644 test/mirror/node_mirror/private.key diff --git a/test/mirror/node_mirror/config.yaml b/test/mirror/node_mirror/config.yaml new file mode 100644 index 000000000..08aa15797 --- /dev/null +++ b/test/mirror/node_mirror/config.yaml @@ -0,0 +1,109 @@ +IsTestMode: true +WorkingRoot: "./" +PubKeyStoreFile: "public.keystore" +PrivateKeyFile: "private.key" +DHTFileName: "dht.db" +ListenAddr: "127.0.0.1:5120" +ThisNodeID: "0000003b5316520d2ecf1adac937301d00a019171d899cb3949a067593559acd" +QPS: 1000 +BillingBlockCount: 3600 +ChainBusPeriod: 1s +BPPeriod: 3s +BPTick: 1s +SQLChainPeriod: 3s +SQLChainTick: 1s +SQLChainTTL: 10 +MinProviderDeposit: 1000000 +ValidDNSKeys: + koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com + mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com + oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com +MinNodeIDDifficulty: 2 +DNSSeed: + EnforcedDNSSEC: false + DNSServers: + - 1.1.1.1 + - 202.46.34.74 + - 202.46.34.75 + - 202.46.34.76 + +BlockProducer: + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + ChainFileName: "chain.db" + BPGenesisInfo: + Version: 1 + BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 + Producer: 0000000000000000000000000000000000000000000000000000000000000001 + MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 + ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 + Timestamp: 2018-08-13T21:59:59.12Z +KnownNodes: +- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 + Nonce: + a: 313283 + b: 0 + c: 0 + d: 0 + Addr: 127.0.0.1:5122 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Leader +- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 + Nonce: + a: 478373 + b: 0 + c: 0 + d: 2305843009893772025 + Addr: 127.0.0.1:5121 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 + Nonce: + a: 259939 + b: 0 + c: 0 + d: 2305843012544226372 + Addr: 127.0.0.1:5120 + PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" + Role: Follower +- ID: 0000003b5316520d2ecf1adac937301d00a019171d899cb3949a067593559acd + Nonce: + a: 829487 + b: 0 + c: 1485583208 + d: 0 + Addr: "" + PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 + Role: Client +- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade + Nonce: + a: 567323 + b: 0 + c: 0 + d: 3104982049 + Addr: 127.0.0.1:5144 + PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 + Role: Miner +- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 + Nonce: + a: 240524 + b: 0 + c: 0 + d: 2305843010430351476 + Addr: 127.0.0.1:5145 + PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 + Role: Miner +- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 + Nonce: + a: 606016 + b: 0 + c: 0 + d: 13835058056920509601 + Addr: 127.0.0.1:5146 + PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 + Role: Miner diff --git a/test/mirror/node_mirror/private.key b/test/mirror/node_mirror/private.key new file mode 100644 index 0000000000000000000000000000000000000000..f563980c1fcd669303b1bee9c2172bf5a3519b8c GIT binary patch literal 96 zcmV-m0H6PF*slzHCqzPE3aw^kxJ?Q%G%ogw14*THn=7~eV;?h-t?#^t5W+6R^1DgL z$@60LgW8>L#Ft4anW%5%J6f5~?krWm@CHc~TLX=J0P-Na@n`wgY{PEN*;2omcYC0; Ca4_ls literal 0 HcmV?d00001 From 4f01e3a537a73fbb7234eec117817011f1a1ec90 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 13:55:46 +0800 Subject: [PATCH 175/244] Update cleanup db script --- cleanupDB.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cleanupDB.sh b/cleanupDB.sh index ff4110925..f77b5ca7e 100755 --- a/cleanupDB.sh +++ b/cleanupDB.sh @@ -6,6 +6,6 @@ cd ${PROJECT_DIR} && find . -name '*.db' -exec rm -vf {} \; cd ${PROJECT_DIR} && find . -name '*.db-shm' -exec rm -vf {} \; cd ${PROJECT_DIR} && find . -name '*.db-wal' -exec rm -vf {} \; cd ${PROJECT_DIR} && find . -name 'db.meta' -exec rm -vf {} \; -cd ${PROJECT_DIR} && find . -name 'public.keystore' -exec rm -vf {} \; -cd ${PROJECT_DIR} && find . -name '*.public.keystore' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name 'public.keystore*' -exec rm -vf {} \; +cd ${PROJECT_DIR} && find . -name '*.public.keystore*' -exec rm -vf {} \; cd ${PROJECT_DIR} && find . -type d -name '*.ldb' -prune -exec rm -vrf {} \; From 1e5489da202e939e9837b67ec06fc14bc4cd029d Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 14:11:44 +0800 Subject: [PATCH 176/244] Use SIGINT instead of SIGTERM for integration test subprocess signal --- cmd/cql-minerd/integration_test.go | 3 +-- cmd/cqld/bench_test.go | 3 +-- cmd/cqld/cqld_test.go | 4 ++-- sqlchain/mirror/mirror_test.go | 5 ++--- sqlchain/observer/observation_test.go | 3 +-- 5 files changed, 7 insertions(+), 11 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 38046c5c3..1b48c744e 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -32,7 +32,6 @@ import ( "strings" "sync" "sync/atomic" - "syscall" "testing" "time" @@ -340,7 +339,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Process.Signal(os.Interrupt) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() diff --git a/cmd/cqld/bench_test.go b/cmd/cqld/bench_test.go index df4d35504..058094076 100644 --- a/cmd/cqld/bench_test.go +++ b/cmd/cqld/bench_test.go @@ -25,7 +25,6 @@ import ( "os/exec" "path/filepath" "sync" - "syscall" "testing" "time" @@ -114,7 +113,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Process.Signal(os.Interrupt) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() diff --git a/cmd/cqld/cqld_test.go b/cmd/cqld/cqld_test.go index 08b1e5894..ca8814e0b 100644 --- a/cmd/cqld/cqld_test.go +++ b/cmd/cqld/cqld_test.go @@ -20,7 +20,7 @@ package main import ( "context" - "syscall" + "os" "testing" "time" @@ -70,7 +70,7 @@ func TestCQLD(t *testing.T) { time.Sleep(15 * time.Second) // Kill one BP follower - err = nodeCmds[2].Cmd.Process.Signal(syscall.SIGTERM) + err = nodeCmds[2].Cmd.Process.Signal(os.Interrupt) So(err, ShouldBeNil) time.Sleep(15 * time.Second) diff --git a/sqlchain/mirror/mirror_test.go b/sqlchain/mirror/mirror_test.go index 2a9093e96..d89b4188b 100644 --- a/sqlchain/mirror/mirror_test.go +++ b/sqlchain/mirror/mirror_test.go @@ -25,7 +25,6 @@ import ( "path/filepath" "strconv" "sync" - "syscall" "testing" "time" @@ -168,7 +167,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Process.Signal(os.Interrupt) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() @@ -276,7 +275,7 @@ func TestFullProcess(t *testing.T) { ) So(err, ShouldBeNil) defer func() { - mirrorCmd.Cmd.Process.Signal(syscall.SIGTERM) + mirrorCmd.Cmd.Process.Signal(os.Interrupt) mirrorCmd.Cmd.Wait() }() diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 265c42d82..5cef357e9 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -28,7 +28,6 @@ import ( "path/filepath" "runtime/debug" "sync" - "syscall" "testing" "time" @@ -191,7 +190,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(syscall.SIGTERM) + thisCmd.Cmd.Process.Signal(os.Interrupt) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() From 9bd6f9f97a85ac50c717236bbdbf86d000685bd4 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 14:28:25 +0800 Subject: [PATCH 177/244] Add integration test case coverage for bin/cql.test --- cmd/cql/internal/base.go | 3 ++- cmd/cql/main.go | 1 - cmd/cql/main_test.go | 10 ++++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/cmd/cql/internal/base.go b/cmd/cql/internal/base.go index 4d9b2a432..7ccf52fd7 100644 --- a/cmd/cql/internal/base.go +++ b/cmd/cql/internal/base.go @@ -98,7 +98,8 @@ func (c *Command) Runnable() bool { var atExitFuncs []func() -func atExit(f func()) { +// AtExit will register function to be executed before exit. +func AtExit(f func()) { atExitFuncs = append(atExitFuncs, f) } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 0b343d254..8809408ab 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -47,7 +47,6 @@ func init() { } func main() { - internal.Version = version // set random diff --git a/cmd/cql/main_test.go b/cmd/cql/main_test.go index ef97d55c7..fa2c6b32a 100644 --- a/cmd/cql/main_test.go +++ b/cmd/cql/main_test.go @@ -18,9 +18,15 @@ package main -import "testing" +import ( + "testing" + + "github.com/CovenantSQL/CovenantSQL/cmd/cql/internal" +) func TestMain(m *testing.M) { - defer m.Run() + internal.AtExit(func() { + m.Run() + }) main() } From 697f7fb79e7e475d132f24f6a899f30e83a9011c Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 14:45:40 +0800 Subject: [PATCH 178/244] Client dsn mirror option coverage --- client/config_test.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/client/config_test.go b/client/config_test.go index a9b49d0b5..be893317f 100644 --- a/client/config_test.go +++ b/client/config_test.go @@ -91,4 +91,13 @@ func TestConfig(t *testing.T) { UseFollower: true, }) }) + + Convey("test format and parse dsn with mirror option", t, func() { + cfg, err := ParseDSN("covenantsql://db?mirror=happy") + So(err, ShouldBeNil) + So(cfg.Mirror, ShouldEqual, "happy") + So(cfg.FormatDSN(), ShouldEqual, "covenantsql://db?mirror=happy") + cfg.Mirror = "" + So(cfg.FormatDSN(), ShouldEqual, "covenantsql://db") + }) } From 03c56e19495987f18beddecb048019bf3453fa68 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 15:00:30 +0800 Subject: [PATCH 179/244] Add more rpc unit test --- rpc/rawcaller_test.go | 76 +++++++++++++++++++++++++++++++++++++++++++ rpc/rpcutil_test.go | 8 +++++ 2 files changed, 84 insertions(+) create mode 100644 rpc/rawcaller_test.go diff --git a/rpc/rawcaller_test.go b/rpc/rawcaller_test.go new file mode 100644 index 000000000..2dbb7edcf --- /dev/null +++ b/rpc/rawcaller_test.go @@ -0,0 +1,76 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package rpc + +import ( + "net" + "testing" + + "github.com/pkg/errors" + . "github.com/smartystreets/goconvey/convey" +) + +type testService struct{} + +func (s *testService) Test(req *int, resp *int) (err error) { + *resp = *req + 1 + return +} + +func (s *testService) TestFailed(req *int, resp *interface{}) (err error) { + return errors.New("failed") +} + +func (s *testService) TestReconnect(req *int, resp *interface{}) (err error) { + return errors.New("shut down") +} + +func TestRawCaller(t *testing.T) { + Convey("test raw caller methods", t, func() { + s := NewServer() + err := s.RegisterService("Test", &testService{}) + So(err, ShouldBeNil) + l, err := net.Listen("tcp", ":0") + So(err, ShouldBeNil) + s.SetListener(l) + go s.Serve() + defer s.Stop() + c := NewRawCaller(l.Addr().String()) + defer c.Close() + var resp int + err = c.Call("Test.Test", 1, &resp) + So(err, ShouldBeNil) + So(resp, ShouldEqual, 2) + err = c.Call("Test.TestFailed", 1, nil) + So(err, ShouldNotBeNil) + So(errors.Cause(err).Error(), ShouldEqual, "failed") + err = c.Call("Test.TestReconnect", 1, nil) + So(err, ShouldNotBeNil) + So(err.Error(), ShouldContainSubstring, "shut down") + So(c.Target(), ShouldEqual, l.Addr().String()) + err = c.Call("Test.Test", 2, &resp) + So(err, ShouldBeNil) + So(resp, ShouldEqual, 3) + + // test new client + c2 := c.New() + defer c2.Close() + err = c2.Call("Test.Test", 4, &resp) + So(err, ShouldBeNil) + So(resp, ShouldEqual, 5) + }) +} diff --git a/rpc/rpcutil_test.go b/rpc/rpcutil_test.go index 57963d432..2e5b66de5 100644 --- a/rpc/rpcutil_test.go +++ b/rpc/rpcutil_test.go @@ -225,6 +225,14 @@ func TestNewPersistentCaller(t *testing.T) { Node: *node1, } + if client.Target() != string(conf.GConf.BP.NodeID) { + t.Fatal("persistent caller target not equal") + } + + if client.New() == nil { + t.Fatal("new persistent caller failed") + } + respA := new(proto.PingResp) err = client.Call("DHT.Ping", reqA, respA) if err != nil { From 301aa44bf0d1c8e077d51c1528d10574e65933f9 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 15:15:24 +0800 Subject: [PATCH 180/244] Add use follower flag test in integration test --- cmd/cql-minerd/integration_test.go | 31 ++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index 1b48c744e..a83cd3ab8 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -595,6 +595,37 @@ func TestFullProcess(t *testing.T) { err = db.Close() So(err, ShouldBeNil) + // test query from follower node + dsnCfgMix := *dsnCfg + dsnCfgMix.UseLeader = true + dsnCfgMix.UseFollower = true + dbMix, err := sql.Open("covenantsql", dsnCfgMix.FormatDSN()) + So(err, ShouldBeNil) + defer dbMix.Close() + + result = 0 + err = dbMix.QueryRow("SELECT * FROM test LIMIT 1").Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 4) + + _, err = dbMix.Exec("INSERT INTO test VALUES(2)") + So(err, ShouldBeNil) + + // test query from follower only + dsnCfgFollower := *dsnCfg + dsnCfgFollower.UseLeader = false + dsnCfgFollower.UseFollower = true + dbFollower, err := sql.Open("covenantsql", dsnCfgFollower.FormatDSN()) + So(err, ShouldBeNil) + defer dbFollower.Close() + + err = dbFollower.QueryRow("SELECT * FROM test LIMIT 1").Scan(&result) + So(err, ShouldBeNil) + So(result, ShouldEqual, 4) + + _, err = dbFollower.Exec("INSERT INTO test VALUES(2)") + So(err, ShouldNotBeNil) + // TODO(lambda): Drop database }) } From eec853e4938747e92814ac60cbeb2d8950d7e855 Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 16:04:41 +0800 Subject: [PATCH 181/244] Use SIGTERM instead of SIGINT in process exit control --- cmd/cql-minerd/integration_test.go | 3 ++- cmd/cqld/bench_test.go | 3 ++- cmd/cqld/cqld_test.go | 4 ++-- sqlchain/mirror/mirror_test.go | 7 ++++--- sqlchain/observer/observation_test.go | 7 ++++--- 5 files changed, 14 insertions(+), 10 deletions(-) diff --git a/cmd/cql-minerd/integration_test.go b/cmd/cql-minerd/integration_test.go index a83cd3ab8..44745276f 100644 --- a/cmd/cql-minerd/integration_test.go +++ b/cmd/cql-minerd/integration_test.go @@ -32,6 +32,7 @@ import ( "strings" "sync" "sync/atomic" + "syscall" "testing" "time" @@ -339,7 +340,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(os.Interrupt) + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() diff --git a/cmd/cqld/bench_test.go b/cmd/cqld/bench_test.go index 058094076..df4d35504 100644 --- a/cmd/cqld/bench_test.go +++ b/cmd/cqld/bench_test.go @@ -25,6 +25,7 @@ import ( "os/exec" "path/filepath" "sync" + "syscall" "testing" "time" @@ -113,7 +114,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(os.Interrupt) + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() diff --git a/cmd/cqld/cqld_test.go b/cmd/cqld/cqld_test.go index ca8814e0b..08b1e5894 100644 --- a/cmd/cqld/cqld_test.go +++ b/cmd/cqld/cqld_test.go @@ -20,7 +20,7 @@ package main import ( "context" - "os" + "syscall" "testing" "time" @@ -70,7 +70,7 @@ func TestCQLD(t *testing.T) { time.Sleep(15 * time.Second) // Kill one BP follower - err = nodeCmds[2].Cmd.Process.Signal(os.Interrupt) + err = nodeCmds[2].Cmd.Process.Signal(syscall.SIGTERM) So(err, ShouldBeNil) time.Sleep(15 * time.Second) diff --git a/sqlchain/mirror/mirror_test.go b/sqlchain/mirror/mirror_test.go index d89b4188b..7980ebc77 100644 --- a/sqlchain/mirror/mirror_test.go +++ b/sqlchain/mirror/mirror_test.go @@ -25,6 +25,7 @@ import ( "path/filepath" "strconv" "sync" + "syscall" "testing" "time" @@ -167,7 +168,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(os.Interrupt) + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() @@ -275,12 +276,12 @@ func TestFullProcess(t *testing.T) { ) So(err, ShouldBeNil) defer func() { - mirrorCmd.Cmd.Process.Signal(os.Interrupt) + mirrorCmd.Cmd.Process.Signal(syscall.SIGTERM) mirrorCmd.Cmd.Wait() }() defer func() { - _ = mirrorCmd.Cmd.Process.Signal(os.Interrupt) + _ = mirrorCmd.Cmd.Process.Signal(syscall.SIGTERM) _ = mirrorCmd.Cmd.Wait() }() diff --git a/sqlchain/observer/observation_test.go b/sqlchain/observer/observation_test.go index 5cef357e9..ccd88c672 100644 --- a/sqlchain/observer/observation_test.go +++ b/sqlchain/observer/observation_test.go @@ -28,6 +28,7 @@ import ( "path/filepath" "runtime/debug" "sync" + "syscall" "testing" "time" @@ -190,7 +191,7 @@ func stopNodes() { wg.Add(1) go func(thisCmd *utils.CMD) { defer wg.Done() - thisCmd.Cmd.Process.Signal(os.Interrupt) + thisCmd.Cmd.Process.Signal(syscall.SIGTERM) thisCmd.Cmd.Wait() grepRace := exec.Command("/bin/sh", "-c", "grep -a -A 50 'DATA RACE' "+thisCmd.LogPath) out, _ := grepRace.Output() @@ -497,7 +498,7 @@ func TestFullProcess(t *testing.T) { So(err, ShouldBeNil) defer func() { - observerCmd.Cmd.Process.Signal(os.Interrupt) + observerCmd.Cmd.Process.Signal(syscall.SIGTERM) observerCmd.Cmd.Wait() }() @@ -712,7 +713,7 @@ func TestFullProcess(t *testing.T) { _, err = client.Drop(dsn2) So(err, ShouldBeNil) - observerCmd.Cmd.Process.Signal(os.Interrupt) + observerCmd.Cmd.Process.Signal(syscall.SIGTERM) observerCmd.Cmd.Wait() // start observer again From 943b3fc0cb1afde2a453f8d557628bd11d36b14a Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 16:26:10 +0800 Subject: [PATCH 182/244] Add missing return statement --- sqlchain/mirror/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sqlchain/mirror/server.go b/sqlchain/mirror/server.go index ca828d81b..d2e176bec 100644 --- a/sqlchain/mirror/server.go +++ b/sqlchain/mirror/server.go @@ -50,7 +50,7 @@ func StartMirror(database string, listenAddr string) (service *Service, err erro // start mirror if err = service.start(); err != nil { - + return } return From bbae073bed4f08d8dc92e68b00987d82c746b7ec Mon Sep 17 00:00:00 2001 From: Qi Xiao Date: Fri, 22 Mar 2019 18:14:35 +0800 Subject: [PATCH 183/244] Remove useless err check --- sqlchain/mirror/server.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sqlchain/mirror/server.go b/sqlchain/mirror/server.go index d2e176bec..f3578f912 100644 --- a/sqlchain/mirror/server.go +++ b/sqlchain/mirror/server.go @@ -49,9 +49,7 @@ func StartMirror(database string, listenAddr string) (service *Service, err erro } // start mirror - if err = service.start(); err != nil { - return - } + err = service.start() return } From 35e464328316cdaa5c97a7626e3d2d8d8dd84ceb Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 22 Mar 2019 19:01:45 +0800 Subject: [PATCH 184/244] Make cql mirror accept both dsn and database_id. --- cmd/cql/internal/cfg.go | 2 ++ cmd/cql/internal/drop.go | 2 +- cmd/cql/internal/mirror.go | 19 +++++++++++++++---- 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 42ee87822..6e9f5d214 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -71,6 +71,8 @@ func configInit() { Exit() } + ConsoleLog.Info("init config success") + // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough // duration. diff --git a/cmd/cql/internal/drop.go b/cmd/cql/internal/drop.go index cc7561994..4562bf36a 100644 --- a/cmd/cql/internal/drop.go +++ b/cmd/cql/internal/drop.go @@ -54,7 +54,7 @@ func runDrop(cmd *Command, args []string) { // drop database if _, err := client.ParseDSN(dsn); err != nil { - // not a dsn + // not a dsn/dbid ConsoleLog.WithField("db", dsn).WithError(err).Error("Not a valid dsn") SetExitStatus(1) return diff --git a/cmd/cql/internal/mirror.go b/cmd/cql/internal/mirror.go index aa4c2a6d6..baa925fa9 100644 --- a/cmd/cql/internal/mirror.go +++ b/cmd/cql/internal/mirror.go @@ -17,6 +17,7 @@ package internal import ( + "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/sqlchain/mirror" "github.com/CovenantSQL/CovenantSQL/utils" ) @@ -30,12 +31,12 @@ var ( // CmdMirror is cql mirror command. var CmdMirror = &Command{ - UsageLine: "cql mirror [-config file] [-tmp-path path] [-bg-log-level level] database address", + UsageLine: "cql mirror [-config file] [-tmp-path path] [-bg-log-level level] dsn/dbid address", Short: "start a SQLChain database mirror", Long: ` Mirror command subscribes database updates and serves a read-only database mirror. e.g. - cql mirror 127.0.0.1:8546 + cql mirror database_id 127.0.0.1:9389 `, } @@ -69,14 +70,24 @@ func runMirror(cmd *Command, args []string) { bgServerInit() if len(args) != 2 { - ConsoleLog.Error("Mirror command need database and listen address as parameters") + ConsoleLog.Error("Mirror command need database_id/dsn and listen address as parameters") SetExitStatus(1) return } - mirrorDatabase = args[0] + dsn := args[0] mirrorAddr = args[1] + cfg, err := client.ParseDSN(dsn) + if err != nil { + // not a dsn/dbid + ConsoleLog.WithField("db", dsn).WithError(err).Error("Not a valid dsn") + SetExitStatus(1) + return + } + + mirrorDatabase = cfg.DatabaseID + cancelFunc := startMirrorServer(mirrorDatabase, mirrorAddr) ExitIfErrors() defer cancelFunc() From 6b51d526f456e09df56eb50b3a6f92773470294b Mon Sep 17 00:00:00 2001 From: Levente Liu Date: Fri, 22 Mar 2019 21:11:59 +0800 Subject: [PATCH 185/244] Use a global leveldb instance for multiple chains --- sqlchain/chain.go | 94 +++++++++++++++++++++++---------------------- worker/db.go | 2 +- worker/db_config.go | 1 + worker/dbms.go | 1 + 4 files changed, 51 insertions(+), 47 deletions(-) diff --git a/sqlchain/chain.go b/sqlchain/chain.go index a64e7ad69..eaf6bb6ef 100644 --- a/sqlchain/chain.go +++ b/sqlchain/chain.go @@ -54,9 +54,13 @@ var ( metaBlockIndex = [4]byte{'B', 'L', 'C', 'K'} metaResponseIndex = [4]byte{'R', 'E', 'S', 'P'} metaAckIndex = [4]byte{'Q', 'A', 'C', 'K'} - leveldbConf = opt.Options{ + + leveldbConf = opt.Options{ Compression: opt.SnappyCompression, } + leveldbInit sync.Once + blkDB *leveldb.DB + txDB *leveldb.DB // Atomic counters for stats cachedBlockCount int32 @@ -94,15 +98,11 @@ func keyWithSymbolToHeight(k []byte) int32 { // Chain represents a sql-chain. type Chain struct { - // bdb stores state, profile and block - bdb *leveldb.DB - // tdb stores ack/request/response - tdb *leveldb.DB - bi *blockIndex - ai *ackIndex - st *x.State - cl *rpc.Caller - rt *runtime + bi *blockIndex + ai *ackIndex + st *x.State + cl *rpc.Caller + rt *runtime blocks chan *types.Block heights chan int32 @@ -121,6 +121,10 @@ type Chain struct { pk *asymmetric.PrivateKey // addr is the AccountAddress generate from public key. addr *proto.AccountAddress + // key prefixes + metaBlockIndex []byte + metaResponseIndex []byte + metaAckIndex []byte } // NewChain creates a new sql-chain struct. @@ -131,23 +135,29 @@ func NewChain(c *Config) (chain *Chain, err error) { // NewChainWithContext creates a new sql-chain struct with context. func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err error) { le := log.WithField("db", c.DatabaseID) - // Open LevelDB for block and state - bdbFile := c.ChainFilePrefix + "-block-state.ldb" - bdb, err := leveldb.OpenFile(bdbFile, &leveldbConf) - if err != nil { - err = errors.Wrapf(err, "open leveldb %s", bdbFile) - return - } - le.Debugf("opened chain bdb %s", bdbFile) - // Open LevelDB for ack/request/response - tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" - tdb, err := leveldb.OpenFile(tdbFile, &leveldbConf) + leveldbInit.Do(func() { + // Open LevelDB for block and state + bdbFile := c.ChainFilePrefix + "-block-state.ldb" + blkDB, err = leveldb.OpenFile(bdbFile, &leveldbConf) + if err != nil { + err = errors.Wrapf(err, "open leveldb %s", bdbFile) + return + } + le.Debugf("opened chain bdb %s", bdbFile) + + // Open LevelDB for ack/request/response + tdbFile := c.ChainFilePrefix + "-ack-req-resp.ldb" + txDB, err = leveldb.OpenFile(tdbFile, &leveldbConf) + if err != nil { + err = errors.Wrapf(err, "open leveldb %s", tdbFile) + return + } + le.Debugf("opened chain tdb %s", tdbFile) + }) if err != nil { - err = errors.Wrapf(err, "open leveldb %s", tdbFile) return } - le.Debugf("opened chain tdb %s", tdbFile) // Open storage var strg xi.Storage @@ -173,8 +183,6 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro // Create chain state chain = &Chain{ - bdb: bdb, - tdb: tdb, bi: newBlockIndex(), ai: newAckIndex(), st: x.NewState(sql.IsolationLevel(c.IsolationLevel), c.Server, strg), @@ -189,8 +197,11 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro updatePeriod: c.UpdatePeriod, databaseID: c.DatabaseID, - pk: pk, - addr: &addr, + pk: pk, + addr: &addr, + metaBlockIndex: utils.ConcatAll([]byte(c.DatabaseID), metaBlockIndex[:]), + metaResponseIndex: utils.ConcatAll([]byte(c.DatabaseID), metaResponseIndex[:]), + metaAckIndex: utils.ConcatAll([]byte(c.DatabaseID), metaAckIndex[:]), } le = le.WithField("peer", chain.rt.getPeerInfoString()) @@ -198,7 +209,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro var ( id uint64 last, parent *blockNode - blockIter = chain.bdb.NewIterator(util.BytesPrefix(metaBlockIndex[:]), nil) + blockIter = blkDB.NewIterator(util.BytesPrefix(chain.metaBlockIndex), nil) ) defer blockIter.Release() for blockIter.Next() { @@ -267,7 +278,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro chain.pruneBlockCache() // Read queries and rebuild memory index - respIter := chain.tdb.NewIterator(util.BytesPrefix(metaResponseIndex[:]), nil) + respIter := txDB.NewIterator(util.BytesPrefix(chain.metaResponseIndex), nil) defer respIter.Release() for respIter.Next() { k := respIter.Key() @@ -289,7 +300,7 @@ func NewChainWithContext(ctx context.Context, c *Config) (chain *Chain, err erro return } - ackIter := chain.tdb.NewIterator(util.BytesPrefix(metaAckIndex[:]), nil) + ackIter := txDB.NewIterator(util.BytesPrefix(chain.metaAckIndex), nil) defer ackIter.Release() for ackIter.Next() { k := ackIter.Key() @@ -338,7 +349,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { Height: node.height, } - blockKey = utils.ConcatAll(metaBlockIndex[:], node.indexKey()) + blockKey = utils.ConcatAll(c.metaBlockIndex, node.indexKey()) encBlock *bytes.Buffer ) if encBlock, err = utils.EncodeMsgPack(b); err != nil { @@ -346,7 +357,7 @@ func (c *Chain) pushBlock(b *types.Block) (err error) { } // Put block - err = c.bdb.Put(blockKey, encBlock.Bytes(), nil) + err = blkDB.Put(blockKey, encBlock.Bytes(), nil) if err != nil { err = errors.Wrapf(err, "put %s", string(node.indexKey())) return @@ -408,14 +419,14 @@ func (c *Chain) pushAckedQuery(ack *types.SignedAckHeader) (err error) { return } - tdbKey := utils.ConcatAll(metaAckIndex[:], k, ack.Hash().AsBytes()) + tdbKey := utils.ConcatAll(c.metaAckIndex, k, ack.Hash().AsBytes()) if err = c.register(ack); err != nil { err = errors.Wrapf(err, "register ack %v at height %d", ack.Hash(), h) return } - if err = c.tdb.Put(tdbKey, enc.Bytes(), nil); err != nil { + if err = txDB.Put(tdbKey, enc.Bytes(), nil); err != nil { err = errors.Wrapf(err, "put ack %d %s", h, ack.Hash().String()) return } @@ -763,17 +774,8 @@ func (c *Chain) Stop() (err error) { le.Debug("stopping chain") c.rt.stop(c.databaseID) le.Debug("chain service and workers stopped") - // Close LevelDB file - var ierr error - if ierr = c.bdb.Close(); ierr != nil && err == nil { - err = ierr - } - le.WithError(ierr).Debug("chain database closed") - if ierr = c.tdb.Close(); ierr != nil && err == nil { - err = ierr - } - le.WithError(ierr).Debug("chain database closed") // Close state + var ierr error if ierr = c.st.Close(false); ierr != nil && err == nil { err = ierr } @@ -817,9 +819,9 @@ func (c *Chain) FetchBlockByCount(count int32) (b *types.Block, realCount int32, } func (c *Chain) fetchBlockByIndexKey(indexKey []byte) (b *types.Block, err error) { - k := utils.ConcatAll(metaBlockIndex[:], indexKey) + k := utils.ConcatAll(c.metaBlockIndex, indexKey) var v []byte - v, err = c.bdb.Get(k, nil) + v, err = blkDB.Get(k, nil) if err != nil { err = errors.Wrapf(err, "fetch block %s", string(k)) return diff --git a/worker/db.go b/worker/db.go index fee900fc9..6a04297bd 100644 --- a/worker/db.go +++ b/worker/db.go @@ -150,7 +150,7 @@ func NewDatabase(cfg *DBConfig, peers *proto.Peers, } // init chain - chainFile := filepath.Join(cfg.DataDir, SQLChainFileName) + chainFile := filepath.Join(cfg.RootDir, SQLChainFileName) if db.nodeID, err = kms.GetLocalNodeID(); err != nil { return } diff --git a/worker/db_config.go b/worker/db_config.go index 38f840959..ec6a2792a 100644 --- a/worker/db_config.go +++ b/worker/db_config.go @@ -26,6 +26,7 @@ import ( // DBConfig defines the database config. type DBConfig struct { DatabaseID proto.DatabaseID + RootDir string DataDir string KayakMux *DBKayakMuxService ChainMux *sqlchain.MuxService diff --git a/worker/dbms.go b/worker/dbms.go index cec621e35..dd472d2d0 100644 --- a/worker/dbms.go +++ b/worker/dbms.go @@ -419,6 +419,7 @@ func (dbms *DBMS) Create(instance *types.ServiceInstance, cleanup bool) (err err // new db dbCfg := &DBConfig{ DatabaseID: instance.DatabaseID, + RootDir: dbms.cfg.RootDir, DataDir: rootDir, KayakMux: dbms.kayakMux, ChainMux: dbms.chainMux, From bc3b2723540fff337f1698f73d018d3cbb58c6f2 Mon Sep 17 00:00:00 2001 From: laodouya Date: Thu, 21 Mar 2019 14:35:41 +0800 Subject: [PATCH 186/244] Move 'cql-utils -tool rpc' func to 'cql rpc' command. --- cmd/cql-utils/main.go | 4 +- cmd/{cql-utils => cql/internal}/rpc.go | 100 ++++++++++++++----------- cmd/cql/main.go | 1 + 3 files changed, 60 insertions(+), 45 deletions(-) rename cmd/{cql-utils => cql/internal}/rpc.go (78%) diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 7a7d8210d..f8bef0a02 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -46,7 +46,7 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, rpc, nonce, confgen, addrgen") + flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, nonce, confgen, addrgen") flag.StringVar(&publicKeyHex, "public", "", "Public key hex string to mine node id/nonce") flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "Private key file to generate/show") flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file to use") @@ -89,8 +89,6 @@ func main() { os.Exit(1) } runKeytool() - case "rpc": - runRPC() case "nonce": runNonce() case "confgen": diff --git a/cmd/cql-utils/rpc.go b/cmd/cql/internal/rpc.go similarity index 78% rename from cmd/cql-utils/rpc.go rename to cmd/cql/internal/rpc.go index e2af31d7e..67bd05195 100644 --- a/cmd/cql-utils/rpc.go +++ b/cmd/cql/internal/rpc.go @@ -1,5 +1,5 @@ /* - * Copyright 2018 The CovenantSQL Authors. + * Copyright 2018-2019 The CovenantSQL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,11 +14,10 @@ * limitations under the License. */ -package main +package internal import ( "encoding/json" - "flag" "fmt" "reflect" "strings" @@ -28,7 +27,6 @@ import ( bp "github.com/CovenantSQL/CovenantSQL/blockproducer" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" - "github.com/CovenantSQL/CovenantSQL/client" "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" @@ -37,7 +35,6 @@ import ( "github.com/CovenantSQL/CovenantSQL/rpc" "github.com/CovenantSQL/CovenantSQL/sqlchain" "github.com/CovenantSQL/CovenantSQL/types" - "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/CovenantSQL/CovenantSQL/worker" ) @@ -48,41 +45,49 @@ var ( route.SQLChainRPCName: &sqlchain.MuxService{}, route.BlockProducerRPCName: &bp.ChainRPCService{}, } - rpcName string - rpcEndpoint string - rpcReq string - rpcTxWaitConfirm bool + rpcName string + rpcEndpoint string + rpcReq string ) +// CmdRPC is cql rpc command entity. +var CmdRPC = &Command{ + UsageLine: "cql rpc [-config file] [-wait-tx-confirm] -name rpc_name -endpoint rpc_endpoint -req rpc_request", + Short: "make a rpc request", + Long: ` +Rpc command make a RPC request to server +e.g. + cql rpc -name -endpoint -req +`, +} + type canSign interface { Sign(signer *asymmetric.PrivateKey) error } func init() { - flag.StringVar(&rpcName, "rpc", "", "rpc name to do test call") - flag.StringVar(&rpcEndpoint, "rpc-endpoint", "", "rpc endpoint to do test call") - flag.StringVar(&rpcReq, "rpc-req", "", "rpc request to do test call, in json format") - flag.BoolVar(&rpcTxWaitConfirm, "rpc-tx-wait-confirm", false, "wait for transaction confirmation") + CmdRPC.Run = runRPC + + addCommonFlags(CmdRPC) + addWaitFlag(CmdRPC) + + CmdRPC.Flag.StringVar(&rpcName, "name", "", "RPC name to do test call") + CmdRPC.Flag.StringVar(&rpcEndpoint, "endpoint", "", "RPC endpoint to do test call") + CmdRPC.Flag.StringVar(&rpcReq, "req", "", "RPC request to do test call, in json format") } -func runRPC() { - if configFile == "" { - // error - log.Fatal("config file path is required for rpc tool") - return - } +func runRPC(cmd *Command, args []string) { + configInit() + if rpcEndpoint == "" || rpcName == "" || rpcReq == "" { // error - log.Fatal("rpc payload is required for rpc tool") - return - } - - if err := client.Init(configFile, []byte("")); err != nil { - log.Fatalf("init rpc client failed: %v\n", err) + ConsoleLog.Error("rpc payload is required for rpc tool") + SetExitStatus(1) return } req, resp := resolveRPCEntities() + ExitIfErrors() if rpcName == route.MCCAddTx.String() { // special type of query @@ -94,7 +99,8 @@ func runRPC() { // fill the req with request body if err := json.Unmarshal([]byte(rpcReq), req); err != nil { - log.Fatalf("decode request body failed: %v\n", err) + ConsoleLog.WithError(err).Error("decode request body failed") + SetExitStatus(1) return } @@ -113,12 +119,14 @@ func runRPC() { // unwrapped tx, find account nonce field and set if err := fillTxNonce(tx); err != nil { - log.Fatalf("fill block producer transaction nonce failed: %v\n", err) + ConsoleLog.WithError(err).Error("fill block producer transaction nonce failed") + SetExitStatus(1) return } if err := checkAndSign(tx); err != nil { - log.Fatalf("sign transaction failed: %v\n", err) + ConsoleLog.WithError(err).Error("sign transaction failed") + SetExitStatus(1) return } } @@ -126,26 +134,27 @@ func runRPC() { // requires signature? if err := checkAndSign(req); err != nil { - log.Fatalf("sign request failed: %v\n", err) + ConsoleLog.WithError(err).Error("sign request failed") + SetExitStatus(1) return } - log.Info("sending request") + ConsoleLog.Info("sending request") spewCfg := spew.NewDefaultConfig() spewCfg.MaxDepth = 6 spewCfg.Dump(req) if err := rpc.NewCaller().CallNode(proto.NodeID(rpcEndpoint), rpcName, req, resp); err != nil { // send request failed - log.Infof("call rpc failed: %v\n", err) + ConsoleLog.Infof("call rpc failed: %v\n", err) return } // print the response - log.Info("got response") + ConsoleLog.Info("got response") spewCfg.Dump(resp) - if rpcName == route.MCCAddTx.String() && rpcTxWaitConfirm { - log.Info("waiting for transaction confirmation...") + if rpcName == route.MCCAddTx.String() && waitTxConfirmation { + ConsoleLog.Info("waiting for transaction confirmation...") var ( err error ticker = time.NewTicker(1 * time.Second) @@ -159,7 +168,9 @@ func runRPC() { route.MCCQueryTxState.String(), req, resp, ); err != nil { - log.Fatalf("query transaction state failed: %v", err) + ConsoleLog.WithError(err).Error("query transaction state failed") + SetExitStatus(1) + return } switch resp.State { case pi.TransactionStatePending: @@ -171,10 +182,14 @@ func runRPC() { return case pi.TransactionStateExpired, pi.TransactionStateNotFound: fmt.Print("✘\n") - log.Fatalf("bad transaction state: %s", resp.State) + ConsoleLog.Errorf("bad transaction state: %s", resp.State) + SetExitStatus(1) + return default: fmt.Print("✘\n") - log.Fatal("unknown transaction state") + ConsoleLog.Error("unknown transaction state") + SetExitStatus(1) + return } <-ticker.C } @@ -187,7 +202,7 @@ func checkAndSign(req interface{}) (err error) { } if canSignObj, ok := req.(canSign); ok { - log.Info("signing request") + ConsoleLog.Info("signing request") var privKey *asymmetric.PrivateKey if privKey, err = kms.GetLocalPrivateKey(); err != nil { @@ -283,7 +298,7 @@ func fillTxNonce(tx pi.Transaction) (err error) { } rv.SetUint(uint64(nonceResp.Nonce)) - log.Infof("filled tx type %s nonce field %s with nonce %d", + ConsoleLog.Infof("filled tx type %s nonce field %s with nonce %d", tx.GetTransactionType().String(), fieldPath, nonceResp.Nonce) return @@ -297,7 +312,8 @@ func resolveRPCEntities() (req interface{}, resp interface{}) { if len(rpcParts) != 2 { // error rpc name - log.Fatalf("%v is not a valid rpc name\n", rpcName) + ConsoleLog.Errorf("%v is not a valid rpc name\n", rpcName) + SetExitStatus(1) return } @@ -314,7 +330,7 @@ func resolveRPCEntities() (req interface{}, resp interface{}) { if method.Name == rpcParts[1] { // name matched if mtype.PkgPath() != "" || mtype.NumIn() != 3 || mtype.NumOut() != 1 { - log.Infof("%v is not a valid rpc endpoint method\n", rpcName) + ConsoleLog.Infof("%v is not a valid rpc endpoint method\n", rpcName) return } @@ -335,6 +351,6 @@ func resolveRPCEntities() (req interface{}, resp interface{}) { } // not found - log.Infof("rpc method %v not found\n", rpcName) + ConsoleLog.Infof("rpc method %v not found\n", rpcName) return } diff --git a/cmd/cql/main.go b/cmd/cql/main.go index 8809408ab..f319705c8 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -41,6 +41,7 @@ func init() { internal.CmdMirror, internal.CmdExplorer, internal.CmdAdapter, + internal.CmdRPC, internal.CmdVersion, internal.CmdHelp, } From 54864de7e1af31004b3f96894554be4bb4a5bcdb Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 22 Mar 2019 15:44:30 +0800 Subject: [PATCH 187/244] Add askDeletePath to cql program. --- cmd/cql/internal/cfg.go | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 6e9f5d214..8d7bee08c 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -17,11 +17,13 @@ package internal import ( + "bufio" "context" "errors" "fmt" "os" "path/filepath" + "strings" "syscall" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -134,3 +136,28 @@ func readMasterKey(skip bool) string { } return string(bytePwd) } + +func askDeletePath(path string) { + if _, err := os.Stat(path); err == nil { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("\"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", + path) + t, err := reader.ReadString('\n') + t = strings.Trim(t, "\n") + if err != nil { + ConsoleLog.WithError(err).Error("unexpected error") + SetExitStatus(1) + Exit() + } + if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { + err = os.RemoveAll(path) + if err != nil { + ConsoleLog.WithError(err).Error("unexpected error") + SetExitStatus(1) + Exit() + } + } else { + Exit() + } + } +} From 2f5b61d9a7ffc3263de9d4b87d46ea11a9395810 Mon Sep 17 00:00:00 2001 From: laodouya Date: Fri, 22 Mar 2019 22:24:06 +0800 Subject: [PATCH 188/244] Label cql drop as current not supported. Print config filepath if init success. --- client/driver.go | 3 ++- cmd/cql/internal/cfg.go | 2 +- crypto/kms/privatekeystore.go | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/client/driver.go b/client/driver.go index 9be70a3c9..5793ebdda 100644 --- a/client/driver.go +++ b/client/driver.go @@ -284,7 +284,8 @@ func Drop(dsn string) (txHash hash.Hash, err error) { _ = cfg - // currently not supported + //TODO(laodouya) currently not supported + err = errors.New("drop db current not support") return } diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index 8d7bee08c..ebf186d04 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -73,7 +73,7 @@ func configInit() { Exit() } - ConsoleLog.Info("init config success") + ConsoleLog.WithField("path", configFile).Info("init config success") // TODO(leventeliu): discover more specific confirmation duration from config. We don't have // enough informations from config to do that currently, so just use a fixed and long enough diff --git a/crypto/kms/privatekeystore.go b/crypto/kms/privatekeystore.go index 65dacb9db..207f49228 100644 --- a/crypto/kms/privatekeystore.go +++ b/crypto/kms/privatekeystore.go @@ -161,7 +161,7 @@ func InitLocalKeyPair(privateKeyPath string, masterKey []byte) (err error) { return } } else { - log.WithError(err).Error("unexpected error while loading private key") + log.WithField("path", privateKeyPath).WithError(err).Error("unexpected error while loading private key") return } } From 7865ca2fa9a21d3fd20bfb5c13a297dbbee6243c Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 15:53:31 +0800 Subject: [PATCH 189/244] Add generate command. add generate addr func. --- cmd/cql-utils/idminer.go | 1 + cmd/cql/internal/generate.go | 125 +++++++++++++++++++++++++++++++++++ cmd/cql/main.go | 1 + 3 files changed, 127 insertions(+) create mode 100644 cmd/cql/internal/generate.go diff --git a/cmd/cql-utils/idminer.go b/cmd/cql-utils/idminer.go index caa526434..e024a4049 100644 --- a/cmd/cql-utils/idminer.go +++ b/cmd/cql-utils/idminer.go @@ -90,6 +90,7 @@ func runMiner() { start := mine.Uint256{D: step*uint64(i) + uint64(rand.Uint32())} log.Infof("miner #%#v start: %#v\n", i, start) miner.ComputeBlockNonce(block, start, difficulty) + //TODO(laodouya) add wait group }(i) } diff --git a/cmd/cql/internal/generate.go b/cmd/cql/internal/generate.go new file mode 100644 index 000000000..b7c7e06de --- /dev/null +++ b/cmd/cql/internal/generate.go @@ -0,0 +1,125 @@ +/* + * Copyright 2018-2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package internal + +import ( + "encoding/hex" + "fmt" + + "github.com/CovenantSQL/CovenantSQL/conf" + "github.com/CovenantSQL/CovenantSQL/crypto" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" +) + +// CmdGenerate is cql generate command entity. +var CmdGenerate = &Command{ + UsageLine: "cql generate [-config file] config/private/addr/public/nonce", + Short: "generate config related file or keys", + Long: ` +Generate command can generate private.key and config.yaml for CovenantSQL. +e.g. + cql generate config +`, +} + +func init() { + CmdGenerate.Run = runGenerate + + addCommonFlags(CmdGenerate) +} + +func runGenerate(cmd *Command, args []string) { + if len(args) != 1 { + ConsoleLog.Error("Generate command need specific type as params") + SetExitStatus(1) + return + } + genType := args[0] + + switch genType { + case "config": + case "private": + case "addr": + addrGen() + case "public": + configInit() + case "nonce": + configInit() + default: + cmd.Usage() + SetExitStatus(1) + return + } +} + +func configGen() { +} + +func privateGen() { +} + +func addrGen() { + configInit() + + var publicKey *asymmetric.PublicKey + + //TODO if config has addr, print + + //TODO if config has public, use it + publicKeyHex := "" + if publicKeyHex != "" { + publicKeyBytes, err := hex.DecodeString(publicKeyHex) + if err != nil { + ConsoleLog.WithError(err).Error("error converting hex") + SetExitStatus(1) + return + } + publicKey, err = asymmetric.ParsePubKey(publicKeyBytes) + if err != nil { + ConsoleLog.WithError(err).Error("error converting public key") + SetExitStatus(1) + return + } + } else { + //use config specific private key file(already init by configInit()) + privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) + if err != nil { + ConsoleLog.WithError(err).Fatal("load private key file failed") + SetExitStatus(1) + return + } + publicKey = privateKey.PubKey() + } + + keyHash, err := crypto.PubKeyHash(publicKey) + if err != nil { + ConsoleLog.WithError(err).Error("unexpected error") + SetExitStatus(1) + return + } + + fmt.Printf("wallet address: %s\n", keyHash.String()) + + //TODO store in config.yaml +} + +func publicGen() { +} + +func nonceGen() { +} diff --git a/cmd/cql/main.go b/cmd/cql/main.go index f319705c8..e0f1b533a 100644 --- a/cmd/cql/main.go +++ b/cmd/cql/main.go @@ -32,6 +32,7 @@ var ( func init() { internal.CqlCommands = []*internal.Command{ + internal.CmdGenerate, internal.CmdConsole, internal.CmdCreate, internal.CmdDrop, From 040621869258a4d32acbf90034b267d849a3a04c Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 16:18:32 +0800 Subject: [PATCH 190/244] Rename 'cql generate addr' to 'cql generate wallet'. And use public key first to generate. --- cmd/cql/internal/generate.go | 39 ++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 22 deletions(-) diff --git a/cmd/cql/internal/generate.go b/cmd/cql/internal/generate.go index b7c7e06de..e6c35a62b 100644 --- a/cmd/cql/internal/generate.go +++ b/cmd/cql/internal/generate.go @@ -17,7 +17,6 @@ package internal import ( - "encoding/hex" "fmt" "github.com/CovenantSQL/CovenantSQL/conf" @@ -28,7 +27,7 @@ import ( // CmdGenerate is cql generate command entity. var CmdGenerate = &Command{ - UsageLine: "cql generate [-config file] config/private/addr/public/nonce", + UsageLine: "cql generate [-config file] config/private/wallet/public/nonce", Short: "generate config related file or keys", Long: ` Generate command can generate private.key and config.yaml for CovenantSQL. @@ -54,8 +53,8 @@ func runGenerate(cmd *Command, args []string) { switch genType { case "config": case "private": - case "addr": - addrGen() + case "wallet": + walletGen() case "public": configInit() case "nonce": @@ -73,33 +72,29 @@ func configGen() { func privateGen() { } -func addrGen() { +func walletGen() { configInit() - var publicKey *asymmetric.PublicKey + //TODO if config has wallet, print and return - //TODO if config has addr, print + var publicKey *asymmetric.PublicKey - //TODO if config has public, use it - publicKeyHex := "" - if publicKeyHex != "" { - publicKeyBytes, err := hex.DecodeString(publicKeyHex) - if err != nil { - ConsoleLog.WithError(err).Error("error converting hex") - SetExitStatus(1) - return - } - publicKey, err = asymmetric.ParsePubKey(publicKeyBytes) - if err != nil { - ConsoleLog.WithError(err).Error("error converting public key") - SetExitStatus(1) - return + //if config has public, use it + for _, node := range conf.GConf.KnownNodes { + if node.ID == conf.GConf.ThisNodeID { + publicKey = node.PublicKey + break } + } + + if publicKey != nil { + ConsoleLog.Infof("use public key in config file: %s", configFile) } else { //use config specific private key file(already init by configInit()) + ConsoleLog.Infof("generate wallet address directly from private key: %s", conf.GConf.PrivateKeyFile) privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) if err != nil { - ConsoleLog.WithError(err).Fatal("load private key file failed") + ConsoleLog.WithError(err).Error("load private key file failed") SetExitStatus(1) return } From a8766b453408b40aa74ba54947faf7f54c043d7f Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 16:20:57 +0800 Subject: [PATCH 191/244] Delete addr gen func in cql-utils cmd --- cmd/cql-utils/addrgen.go | 64 ---------------------------------------- cmd/cql-utils/main.go | 8 +---- 2 files changed, 1 insertion(+), 71 deletions(-) delete mode 100644 cmd/cql-utils/addrgen.go diff --git a/cmd/cql-utils/addrgen.go b/cmd/cql-utils/addrgen.go deleted file mode 100644 index 3c9a9bd77..000000000 --- a/cmd/cql-utils/addrgen.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "github.com/CovenantSQL/CovenantSQL/crypto" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -func runAddrgen() { - var publicKey *asymmetric.PublicKey - - if publicKeyHex != "" { - publicKeyBytes, err := hex.DecodeString(publicKeyHex) - if err != nil { - log.WithError(err).Fatal("error converting hex") - } - publicKey, err = asymmetric.ParsePubKey(publicKeyBytes) - if err != nil { - log.WithError(err).Fatal("error converting public key") - } - } else if privateKeyFile != "" { - masterKey, err := readMasterKey() - if err != nil { - fmt.Printf("read master key failed: %v\n", err) - os.Exit(1) - } - privateKey, err := kms.LoadPrivateKey(privateKeyFile, []byte(masterKey)) - if err != nil { - log.WithError(err).Fatal("load private key file failed") - } - publicKey = privateKey.PubKey() - } else { - fmt.Println("privateKey path or publicKey hex is required for addrgen") - os.Exit(1) - } - - keyHash, err := crypto.PubKeyHash(publicKey) - if err != nil { - log.WithError(err).Fatal("unexpected error") - } - - fmt.Printf("wallet address: %s\n", keyHash.String()) -} diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index f8bef0a02..28416b97a 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -46,7 +46,7 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, nonce, confgen, addrgen") + flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, nonce, confgen") flag.StringVar(&publicKeyHex, "public", "", "Public key hex string to mine node id/nonce") flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "Private key file to generate/show") flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file to use") @@ -93,12 +93,6 @@ func main() { runNonce() case "confgen": runConfgen() - case "addrgen": - if privateKeyFile == "" && publicKeyHex == "" { - log.Error("privateKey path or publicKey hex is required for addrgen") - os.Exit(1) - } - runAddrgen() default: flag.Usage() os.Exit(1) From 53b39476d2577122ea572f29fdfdfbdaba5222e2 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 16:36:14 +0800 Subject: [PATCH 192/244] Remove IsTestMode use UseTestMasterKey instead --- test/GNTE/conf/node_0/config.yaml | 2 +- test/GNTE/conf/node_1/config.yaml | 2 +- test/GNTE/conf/node_2/config.yaml | 2 +- test/GNTE/conf/node_c/config.yaml | 2 +- .../conf/node_miner_10.250.100.2/config.yaml | 4 +- .../conf/node_miner_10.250.100.3/config.yaml | 4 +- .../conf/node_miner_10.250.100.4/config.yaml | 4 +- .../conf/node_miner_10.250.100.5/config.yaml | 4 +- .../conf/node_miner_10.250.100.6/config.yaml | 4 +- .../conf/node_miner_10.250.100.7/config.yaml | 4 +- .../conf/node_miner_10.250.100.8/config.yaml | 4 +- .../conf/node_miner_10.250.100.9/config.yaml | 4 +- test/bench_testnet/node_c/config.yaml | 2 +- test/bootstrap.yaml | 2 +- test/compatibility/node_0/config.yaml | 2 +- test/compatibility/node_1/config.yaml | 2 +- test/compatibility/node_2/config.yaml | 2 +- test/compatibility/node_c/config.yaml | 2 +- test/compatibility/node_miner_0/config.yaml | 4 +- test/compatibility/node_miner_1/config.yaml | 4 +- test/compatibility/node_miner_2/config.yaml | 4 +- test/fuse/node_0/config.yaml | 2 +- test/fuse/node_1/config.yaml | 2 +- test/fuse/node_2/config.yaml | 2 +- test/fuse/node_c/config.yaml | 2 +- test/fuse/node_miner_0/config.yaml | 4 +- test/fuse/node_miner_1/config.yaml | 4 +- test/fuse/node_miner_2/config.yaml | 4 +- test/integration/node_0/config.yaml | 2 +- test/integration/node_1/config.yaml | 2 +- test/integration/node_2/config.yaml | 2 +- test/integration/node_c/config.yaml | 107 +----------------- test/integration/node_miner_0/config.yaml | 4 +- test/integration/node_miner_1/config.yaml | 4 +- test/integration/node_miner_2/config.yaml | 4 +- test/leak/client.yaml | 2 +- test/leak/leader.yaml | 2 +- test/mainchain/node_0/config.yaml | 2 +- test/mainchain/node_1/config.yaml | 2 +- test/mainchain/node_2/config.yaml | 2 +- test/mainchain/node_c/config.yaml | 2 +- test/mainchain/node_miner_0/config.yaml | 4 +- test/mainchain/node_miner_1/config.yaml | 4 +- test/mainchain/node_miner_2/config.yaml | 4 +- test/mainchain/node_multi_0/config.yaml | 2 +- test/mainchain/node_multi_1/config.yaml | 2 +- test/mainchain/node_multi_2/config.yaml | 2 +- test/mainchain/node_standalone/config.yaml | 2 +- test/node_0/config.yaml | 2 +- test/node_1/config.yaml | 2 +- test/node_2/config.yaml | 2 +- test/node_c/config.yaml | 2 +- test/node_standalone/config.yaml | 2 +- test/observation/node_0/config.yaml | 2 +- test/observation/node_1/config.yaml | 2 +- test/observation/node_2/config.yaml | 2 +- test/observation/node_c/config.yaml | 2 +- test/observation/node_miner_0/config.yaml | 4 +- test/observation/node_miner_1/config.yaml | 4 +- test/observation/node_miner_2/config.yaml | 4 +- test/observation/node_observer/config.yaml | 2 +- test/pool/client.yaml | 2 +- test/pool/leader.yaml | 2 +- test/service/fullnode_0/config.yaml | 2 +- test/service/node_0/config.yaml | 2 +- test/service/node_1/config.yaml | 2 +- test/service/node_2/config.yaml | 2 +- test/service/node_adapter/config.yaml | 2 +- test/service/node_c/config.yaml | 2 +- test/service/node_miner_0/config.yaml | 4 +- test/service/node_miner_1/config.yaml | 4 +- test/service/node_miner_2/config.yaml | 4 +- test/service/node_mysql_adapter/config.yaml | 2 +- test/service/node_observer/config.yaml | 2 +- 74 files changed, 102 insertions(+), 203 deletions(-) diff --git a/test/GNTE/conf/node_0/config.yaml b/test/GNTE/conf/node_0/config.yaml index be7ed85bd..13597a86e 100644 --- a/test/GNTE/conf/node_0/config.yaml +++ b/test/GNTE/conf/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/GNTE/conf/node_1/config.yaml b/test/GNTE/conf/node_1/config.yaml index 69ddcc69c..2cb0832e9 100644 --- a/test/GNTE/conf/node_1/config.yaml +++ b/test/GNTE/conf/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/GNTE/conf/node_2/config.yaml b/test/GNTE/conf/node_2/config.yaml index 3acffc6fa..c01f30d53 100644 --- a/test/GNTE/conf/node_2/config.yaml +++ b/test/GNTE/conf/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/GNTE/conf/node_c/config.yaml b/test/GNTE/conf/node_c/config.yaml index fa2c62bb2..f590a1e9c 100644 --- a/test/GNTE/conf/node_c/config.yaml +++ b/test/GNTE/conf/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml index c9502ed53..563e8de60 100644 --- a/test/GNTE/conf/node_miner_10.250.100.2/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml index 2e49c3115..c238af844 100644 --- a/test/GNTE/conf/node_miner_10.250.100.3/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.3/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml index 403b7b9b2..afb21a8eb 100644 --- a/test/GNTE/conf/node_miner_10.250.100.4/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.4/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml index 490caa74b..ceebac553 100755 --- a/test/GNTE/conf/node_miner_10.250.100.5/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.5/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml index 24ff73a55..29e5ed09c 100755 --- a/test/GNTE/conf/node_miner_10.250.100.6/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.6/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml index a7a2abc68..f13ea3489 100755 --- a/test/GNTE/conf/node_miner_10.250.100.7/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.7/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml index 1b9c4859e..0652133ca 100755 --- a/test/GNTE/conf/node_miner_10.250.100.8/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.8/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml index 5e9c09e59..a847f6df0 100755 --- a/test/GNTE/conf/node_miner_10.250.100.9/config.yaml +++ b/test/GNTE/conf/node_miner_10.250.100.9/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -63,7 +63,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/bench_testnet/node_c/config.yaml b/test/bench_testnet/node_c/config.yaml index 9bade087e..1e766bb9e 100644 --- a/test/bench_testnet/node_c/config.yaml +++ b/test/bench_testnet/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true StartupSyncHoles: true WorkingRoot: ./ PubKeyStoreFile: public.keystore diff --git a/test/bootstrap.yaml b/test/bootstrap.yaml index 671ed9f42..7c29a4a37 100644 --- a/test/bootstrap.yaml +++ b/test/bootstrap.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/compatibility/node_0/config.yaml b/test/compatibility/node_0/config.yaml index cd1a54cda..61f590880 100644 --- a/test/compatibility/node_0/config.yaml +++ b/test/compatibility/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/compatibility/node_1/config.yaml b/test/compatibility/node_1/config.yaml index eca03e17d..4571d0040 100644 --- a/test/compatibility/node_1/config.yaml +++ b/test/compatibility/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/compatibility/node_2/config.yaml b/test/compatibility/node_2/config.yaml index 30a422bc3..e651e3aa0 100644 --- a/test/compatibility/node_2/config.yaml +++ b/test/compatibility/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/compatibility/node_c/config.yaml b/test/compatibility/node_c/config.yaml index 84320e6c2..d1830a49a 100644 --- a/test/compatibility/node_c/config.yaml +++ b/test/compatibility/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/compatibility/node_miner_0/config.yaml b/test/compatibility/node_miner_0/config.yaml index ce99baf98..382b3bd15 100644 --- a/test/compatibility/node_miner_0/config.yaml +++ b/test/compatibility/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/compatibility/node_miner_1/config.yaml b/test/compatibility/node_miner_1/config.yaml index 6cec5bbc7..dc9b12c3d 100644 --- a/test/compatibility/node_miner_1/config.yaml +++ b/test/compatibility/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/compatibility/node_miner_2/config.yaml b/test/compatibility/node_miner_2/config.yaml index ec0ac5060..d19615578 100644 --- a/test/compatibility/node_miner_2/config.yaml +++ b/test/compatibility/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/fuse/node_0/config.yaml b/test/fuse/node_0/config.yaml index d45165d29..1144f6ace 100644 --- a/test/fuse/node_0/config.yaml +++ b/test/fuse/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/fuse/node_1/config.yaml b/test/fuse/node_1/config.yaml index 6e97c1945..8b9e5736a 100644 --- a/test/fuse/node_1/config.yaml +++ b/test/fuse/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/fuse/node_2/config.yaml b/test/fuse/node_2/config.yaml index e714897cb..ad5e833b4 100644 --- a/test/fuse/node_2/config.yaml +++ b/test/fuse/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/fuse/node_c/config.yaml b/test/fuse/node_c/config.yaml index 7fb50336e..110173459 100644 --- a/test/fuse/node_c/config.yaml +++ b/test/fuse/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/fuse/node_miner_0/config.yaml b/test/fuse/node_miner_0/config.yaml index c40ddaad0..fc718cc76 100644 --- a/test/fuse/node_miner_0/config.yaml +++ b/test/fuse/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/fuse/node_miner_1/config.yaml b/test/fuse/node_miner_1/config.yaml index 118567931..71af068ca 100644 --- a/test/fuse/node_miner_1/config.yaml +++ b/test/fuse/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/fuse/node_miner_2/config.yaml b/test/fuse/node_miner_2/config.yaml index 25a303475..8356818f3 100644 --- a/test/fuse/node_miner_2/config.yaml +++ b/test/fuse/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "60s" ProvideServiceInterval: "3s" diff --git a/test/integration/node_0/config.yaml b/test/integration/node_0/config.yaml index 397b65eb8..73ff9c935 100644 --- a/test/integration/node_0/config.yaml +++ b/test/integration/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/integration/node_1/config.yaml b/test/integration/node_1/config.yaml index d35061395..d6089d9ea 100644 --- a/test/integration/node_1/config.yaml +++ b/test/integration/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/integration/node_2/config.yaml b/test/integration/node_2/config.yaml index 85dce477d..213f6dc9a 100644 --- a/test/integration/node_2/config.yaml +++ b/test/integration/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/integration/node_c/config.yaml b/test/integration/node_c/config.yaml index f4f265e8c..ca473fabe 100644 --- a/test/integration/node_c/config.yaml +++ b/test/integration/node_c/config.yaml @@ -1,89 +1,15 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" DHTFileName: "dht.db" ListenAddr: "127.0.0.1:3120" ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" -QPS: 1000 -BillingBlockCount: 2 -ChainBusPeriod: 1s -BPPeriod: 3s -BPTick: 1s -SQLChainPeriod: 3s -SQLChainTick: 1s -SQLChainTTL: 10 -MinProviderDeposit: 1000000 -ValidDNSKeys: - koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com - mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com - oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com -MinNodeIDDifficulty: 2 + DNSSeed: - EnforcedDNSSEC: false - DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + Domain: intergration-test.gridb.io -BlockProducer: - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - ChainFileName: "chain.db" - BPGenesisInfo: - Version: 1 - BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154 - Producer: 0000000000000000000000000000000000000000000000000000000000000001 - MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001 - ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 - Timestamp: 2018-08-13T21:59:59.12Z - BaseAccounts: - - Address: ba0ba731c7a76ccef2c1170f42038f7e228dfb474ef0190dfe35d9a37911ed37 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 - - Address: 1a7b0959bbd0d0ec529278a61c0056c277bffe75b2646e1699b46b10a90210be - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 - - Address: 9235bc4130a2ed4e6c35ea189dab35198ebb105640bedb97dd5269cc80863b16 - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 - - Address: 9e1618775cceeb19f110e04fbc6c5bca6c8e4e9b116e193a42fe69bf602e7bcd - StableCoinBalance: 1000000000 - CovenantCoinBalance: 1000000000 KnownNodes: -- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - Nonce: - a: 313283 - b: 0 - c: 0 - d: 0 - Addr: 127.0.0.1:3122 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Leader -- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - Nonce: - a: 478373 - b: 0 - c: 0 - d: 2305843009893772025 - Addr: 127.0.0.1:3121 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower -- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - Nonce: - a: 259939 - b: 0 - c: 0 - d: 2305843012544226372 - Addr: 127.0.0.1:3120 - PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" - Role: Follower - ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d Nonce: a: 22403 @@ -93,30 +19,3 @@ KnownNodes: Addr: "" PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 Role: Client -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: 127.0.0.1:2144 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: 127.0.0.1:2145 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8 - Nonce: - a: 606016 - b: 0 - c: 0 - d: 13835058056920509601 - Addr: 127.0.0.1:2146 - PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5 - Role: Miner diff --git a/test/integration/node_miner_0/config.yaml b/test/integration/node_miner_0/config.yaml index ceac395a8..9c333b58e 100644 --- a/test/integration/node_miner_0/config.yaml +++ b/test/integration/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/integration/node_miner_1/config.yaml b/test/integration/node_miner_1/config.yaml index 41eb0305b..647789173 100644 --- a/test/integration/node_miner_1/config.yaml +++ b/test/integration/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/integration/node_miner_2/config.yaml b/test/integration/node_miner_2/config.yaml index 51ec8f581..6cbc25244 100644 --- a/test/integration/node_miner_2/config.yaml +++ b/test/integration/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 1000000000 CovenantCoinBalance: 1000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "60s" diff --git a/test/leak/client.yaml b/test/leak/client.yaml index ca8416194..b31011499 100644 --- a/test/leak/client.yaml +++ b/test/leak/client.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/leak/leader.yaml b/test/leak/leader.yaml index fdd9b219e..fd7eafac6 100644 --- a/test/leak/leader.yaml +++ b/test/leak/leader.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "./leader/public.keystore" PrivateKeyFile: "./leader/private.key" diff --git a/test/mainchain/node_0/config.yaml b/test/mainchain/node_0/config.yaml index 89803b202..578235e68 100644 --- a/test/mainchain/node_0/config.yaml +++ b/test/mainchain/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_1/config.yaml b/test/mainchain/node_1/config.yaml index 2c201b374..9e430f4a0 100644 --- a/test/mainchain/node_1/config.yaml +++ b/test/mainchain/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_2/config.yaml b/test/mainchain/node_2/config.yaml index 3205f0ef9..38ddcf7e2 100644 --- a/test/mainchain/node_2/config.yaml +++ b/test/mainchain/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_c/config.yaml b/test/mainchain/node_c/config.yaml index 2222031a6..2bf8e67f4 100644 --- a/test/mainchain/node_c/config.yaml +++ b/test/mainchain/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_miner_0/config.yaml b/test/mainchain/node_miner_0/config.yaml index b5c5bf3a1..f023f8c4f 100644 --- a/test/mainchain/node_miner_0/config.yaml +++ b/test/mainchain/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/mainchain/node_miner_1/config.yaml b/test/mainchain/node_miner_1/config.yaml index e20ae69f1..07c76ab65 100644 --- a/test/mainchain/node_miner_1/config.yaml +++ b/test/mainchain/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/mainchain/node_miner_2/config.yaml b/test/mainchain/node_miner_2/config.yaml index ae0fbbb56..326249185 100644 --- a/test/mainchain/node_miner_2/config.yaml +++ b/test/mainchain/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/mainchain/node_multi_0/config.yaml b/test/mainchain/node_multi_0/config.yaml index 07201fb26..a3e91c22b 100644 --- a/test/mainchain/node_multi_0/config.yaml +++ b/test/mainchain/node_multi_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_multi_1/config.yaml b/test/mainchain/node_multi_1/config.yaml index 2e8d93c14..34943f85e 100644 --- a/test/mainchain/node_multi_1/config.yaml +++ b/test/mainchain/node_multi_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_multi_2/config.yaml b/test/mainchain/node_multi_2/config.yaml index 2b0a330d3..8ee462429 100644 --- a/test/mainchain/node_multi_2/config.yaml +++ b/test/mainchain/node_multi_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/mainchain/node_standalone/config.yaml b/test/mainchain/node_standalone/config.yaml index 07201fb26..a3e91c22b 100644 --- a/test/mainchain/node_standalone/config.yaml +++ b/test/mainchain/node_standalone/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/node_0/config.yaml b/test/node_0/config.yaml index 5993a318b..dd3925770 100644 --- a/test/node_0/config.yaml +++ b/test/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/node_1/config.yaml b/test/node_1/config.yaml index ba7293c50..d16604b70 100644 --- a/test/node_1/config.yaml +++ b/test/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/node_2/config.yaml b/test/node_2/config.yaml index d6d0ae221..de5636f07 100644 --- a/test/node_2/config.yaml +++ b/test/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/node_c/config.yaml b/test/node_c/config.yaml index d3ba2e5ee..872aef58d 100644 --- a/test/node_c/config.yaml +++ b/test/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/node_standalone/config.yaml b/test/node_standalone/config.yaml index f7f6c3953..f0931af9c 100644 --- a/test/node_standalone/config.yaml +++ b/test/node_standalone/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/observation/node_0/config.yaml b/test/observation/node_0/config.yaml index e5c50bcd7..025952e39 100644 --- a/test/observation/node_0/config.yaml +++ b/test/observation/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/observation/node_1/config.yaml b/test/observation/node_1/config.yaml index a301e204b..6363edd9a 100644 --- a/test/observation/node_1/config.yaml +++ b/test/observation/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/observation/node_2/config.yaml b/test/observation/node_2/config.yaml index 3fd450a21..7966ad7a8 100644 --- a/test/observation/node_2/config.yaml +++ b/test/observation/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/observation/node_c/config.yaml b/test/observation/node_c/config.yaml index f864108f3..f84f31afa 100644 --- a/test/observation/node_c/config.yaml +++ b/test/observation/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/observation/node_miner_0/config.yaml b/test/observation/node_miner_0/config.yaml index 40d8203f0..fa225c980 100644 --- a/test/observation/node_miner_0/config.yaml +++ b/test/observation/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/observation/node_miner_1/config.yaml b/test/observation/node_miner_1/config.yaml index d9058479b..12a7d267e 100644 --- a/test/observation/node_miner_1/config.yaml +++ b/test/observation/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/observation/node_miner_2/config.yaml b/test/observation/node_miner_2/config.yaml index 536917088..ba3f93bec 100644 --- a/test/observation/node_miner_2/config.yaml +++ b/test/observation/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -35,7 +35,7 @@ BlockProducer: ParentHash: 0000000000000000000000000000000000000000000000000000000000000001 Timestamp: 2018-08-13T21:59:59.12Z Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/observation/node_observer/config.yaml b/test/observation/node_observer/config.yaml index 16d985851..0cc45046c 100644 --- a/test/observation/node_observer/config.yaml +++ b/test/observation/node_observer/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/pool/client.yaml b/test/pool/client.yaml index 208481519..983a567de 100644 --- a/test/pool/client.yaml +++ b/test/pool/client.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/pool/leader.yaml b/test/pool/leader.yaml index e7fd47f13..021620c30 100644 --- a/test/pool/leader.yaml +++ b/test/pool/leader.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/fullnode_0/config.yaml b/test/service/fullnode_0/config.yaml index a54e62c05..5961e6a5b 100644 --- a/test/service/fullnode_0/config.yaml +++ b/test/service/fullnode_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true StartupSyncHoles: false WorkingRoot: ./ PubKeyStoreFile: public.keystore diff --git a/test/service/node_0/config.yaml b/test/service/node_0/config.yaml index 31391385e..2c757d75a 100644 --- a/test/service/node_0/config.yaml +++ b/test/service/node_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_1/config.yaml b/test/service/node_1/config.yaml index 5e2fa7460..54978d735 100644 --- a/test/service/node_1/config.yaml +++ b/test/service/node_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_2/config.yaml b/test/service/node_2/config.yaml index 413220a15..f5eb8c09e 100644 --- a/test/service/node_2/config.yaml +++ b/test/service/node_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_adapter/config.yaml b/test/service/node_adapter/config.yaml index 5887b607d..783c37713 100644 --- a/test/service/node_adapter/config.yaml +++ b/test/service/node_adapter/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_c/config.yaml b/test/service/node_c/config.yaml index 5b552dfbc..5ca55ca13 100644 --- a/test/service/node_c/config.yaml +++ b/test/service/node_c/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_miner_0/config.yaml b/test/service/node_miner_0/config.yaml index 80ba27bf7..90da6771e 100644 --- a/test/service/node_miner_0/config.yaml +++ b/test/service/node_miner_0/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/service/node_miner_1/config.yaml b/test/service/node_miner_1/config.yaml index 927ecbe42..589f5b210 100644 --- a/test/service/node_miner_1/config.yaml +++ b/test/service/node_miner_1/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/service/node_miner_2/config.yaml b/test/service/node_miner_2/config.yaml index 1dd21fdf1..4b0ee8158 100644 --- a/test/service/node_miner_2/config.yaml +++ b/test/service/node_miner_2/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -48,7 +48,7 @@ BlockProducer: StableCoinBalance: 10000000000000000000 CovenantCoinBalance: 10000000000000000000 Miner: - IsTestMode: true + UseTestMasterKey: true RootDir: "./data" MaxReqTimeGap: "2s" ProvideServiceInterval: "3s" diff --git a/test/service/node_mysql_adapter/config.yaml b/test/service/node_mysql_adapter/config.yaml index a7c347fa2..a2bdec0e6 100644 --- a/test/service/node_mysql_adapter/config.yaml +++ b/test/service/node_mysql_adapter/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/service/node_observer/config.yaml b/test/service/node_observer/config.yaml index eca2f1daf..01e1ade1d 100644 --- a/test/service/node_observer/config.yaml +++ b/test/service/node_observer/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" From b59b84492a81766c65070df38bb8f84ab76284cf Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 16:37:34 +0800 Subject: [PATCH 193/244] Fix client unit test --- client/driver.go | 11 +++++------ client/driver_test.go | 14 ++++++++++---- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/client/driver.go b/client/driver.go index 9be70a3c9..cbb426d9a 100644 --- a/client/driver.go +++ b/client/driver.go @@ -66,7 +66,8 @@ var ( globalSeqNo uint64 randSource = rand.New(rand.NewSource(time.Now().UnixNano())) - defaultConfigFile = "~/.cql/config.yaml" + // Default path of config file + DefaultConfigFile = "~/.cql/config.yaml" ) func init() { @@ -105,8 +106,8 @@ type ResourceMeta struct { } func defaultInit() (err error) { - configFile := utils.HomeDirExpand(defaultConfigFile) - if configFile == defaultConfigFile { + configFile := utils.HomeDirExpand(DefaultConfigFile) + if configFile == DefaultConfigFile { //System not support ~ dir, need Init manually. log.Debugf("Could not find CovenantSQL default config location: %v", configFile) return ErrNotInitialized @@ -282,9 +283,7 @@ func Drop(dsn string) (txHash hash.Hash, err error) { return } - _ = cfg - - // currently not supported + peerList.Delete(cfg.DatabaseID) return } diff --git a/client/driver_test.go b/client/driver_test.go index 0c2c5134c..367a31fda 100644 --- a/client/driver_test.go +++ b/client/driver_test.go @@ -45,6 +45,7 @@ func TestInit(t *testing.T) { var err error stopTestService, confDir, err = startTestService() + log.Debugf("config dir: %s", confDir) So(err, ShouldBeNil) defer stopTestService() @@ -55,7 +56,7 @@ func TestInit(t *testing.T) { // fake driver not initialized atomic.StoreUint32(&driverInitialized, 0) err = Init(filepath.Join(confDir, "config.yaml"), []byte("")) - So(err, ShouldBeNil) + So(err.Error(), ShouldResemble, "call DHT.Ping failed: setting Leader node is not permitted") // test loaded block producer nodes bps := route.GetBPs() @@ -197,9 +198,12 @@ func TestOpen(t *testing.T) { _, err = cqlDriver.Open("invalid dsn") So(err, ShouldNotBeNil) - // not initialized(will run defaultInit once) - _, err = cqlDriver.Open("covenantsql://db") - So(err, ShouldNotBeNil) + if !utils.Exist(utils.HomeDirExpand(DefaultConfigFile)) { + // not initialized(will run defaultInit once) + _, err = cqlDriver.Open("covenantsql://db") + log.Errorf("2nd time open %v", err) + So(err, ShouldNotBeNil) + } // reset driver not initialized atomic.StoreUint32(&driverInitialized, 0) @@ -210,6 +214,8 @@ func TestGetTokenBalance(t *testing.T) { Convey("test get token balance", t, func() { var stopTestService func() var err error + // reset driver not initialized + atomic.StoreUint32(&driverInitialized, 0) // driver not initialized _, err = GetTokenBalance(types.Particle) From 7b9120a6afe05fd72b489315e35c195fc758dca9 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 16:37:58 +0800 Subject: [PATCH 194/244] Remove Uint256 to IPv6 stuff, use https://github.com/CovenantSQL/beacon --- pow/cpuminer/uint256.go | 22 ---------------- pow/cpuminer/uint256_test.go | 49 ------------------------------------ 2 files changed, 71 deletions(-) diff --git a/pow/cpuminer/uint256.go b/pow/cpuminer/uint256.go index 6d7190a53..ba48592b0 100644 --- a/pow/cpuminer/uint256.go +++ b/pow/cpuminer/uint256.go @@ -20,7 +20,6 @@ import ( "bytes" "encoding/binary" "errors" - "net" hsp "github.com/CovenantSQL/HashStablePack/marshalhash" ) @@ -78,24 +77,3 @@ func Uint256FromBytes(b []byte) (*Uint256, error) { binary.Read(bytes.NewBuffer(b), binary.BigEndian, &i) return &i, nil } - -// ToIPv6 converts Uint256 to 2 IPv6 addresses. -func (i *Uint256) ToIPv6() (ab, cd net.IP, err error) { - buf := i.Bytes() - ab = make(net.IP, 0, net.IPv6len) - cd = make(net.IP, 0, net.IPv6len) - ab = append(ab, buf[:16]...) - cd = append(cd, buf[16:]...) - return -} - -//FromIPv6 converts 2 IPv6 addresses to Uint256 -func FromIPv6(ab, cd net.IP) (ret *Uint256, err error) { - if ab == nil || cd == nil || len(ab) == 0 || len(cd) == 0 { - return nil, ErrEmptyIPv6Addr - } - buf := make([]byte, 0, 32) - buf = append(buf, ab...) - buf = append(buf, cd...) - return Uint256FromBytes(buf) -} diff --git a/pow/cpuminer/uint256_test.go b/pow/cpuminer/uint256_test.go index 3aa157ba7..9fcfb6bd6 100644 --- a/pow/cpuminer/uint256_test.go +++ b/pow/cpuminer/uint256_test.go @@ -92,52 +92,3 @@ func TestUint256_Inc(t *testing.T) { So(i.D, ShouldEqual, 0) }) } - -func TestUint256_ToIPv6(t *testing.T) { - Convey("uint256 to IPv6", t, func() { - src := Uint256{} - ab, cd, err := src.ToIPv6() - So(ab.IsUnspecified(), ShouldBeTrue) - So(cd.IsUnspecified(), ShouldBeTrue) - - i, err := FromIPv6(ab, cd) - So(err, ShouldBeNil) - So(i.A, ShouldEqual, 0) - So(i.B, ShouldEqual, 0) - So(i.C, ShouldEqual, 0) - So(i.D, ShouldEqual, 0) - - src = Uint256{math.MaxUint64, math.MaxUint64, math.MaxUint64, math.MaxUint64} - ab, cd, err = src.ToIPv6() - So(err, ShouldBeNil) - So(ab.String(), ShouldEqual, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") - So(cd.String(), ShouldEqual, "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff") - - src = Uint256{math.MaxUint64, 0, math.MaxUint64, 0x10} - ab, cd, err = src.ToIPv6() - So(err, ShouldBeNil) - So(ab.String(), ShouldEqual, "ffff:ffff:ffff:ffff::") - So(cd.String(), ShouldEqual, "ffff:ffff:ffff:ffff::10") - - //00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 - src = Uint256{313283, 0, 0, 0} - ab, cd, err = src.ToIPv6() - So(err, ShouldBeNil) - So(ab.String(), ShouldEqual, "0:0:4:c7c3::") - So(cd.String(), ShouldEqual, "::") - - //00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35 - src = Uint256{478373, 0, 0, 2305843009893772025} - ab, cd, err = src.ToIPv6() - So(err, ShouldBeNil) - So(ab.String(), ShouldEqual, "0:0:7:4ca5::") - So(cd.String(), ShouldEqual, "::2000:0:2889:2af9") - - //000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582 - src = Uint256{259939, 0, 0, 2305843012544226372} - ab, cd, err = src.ToIPv6() - So(err, ShouldBeNil) - So(ab.String(), ShouldEqual, "0:0:3:f763::") - So(cd.String(), ShouldEqual, "::2000:0:c683:e444") - }) -} From 7bfa51a0e334d86f96c3fa615aac1963bae0a327 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 16:39:03 +0800 Subject: [PATCH 195/244] Remove DNSSEC for bootstrap --- route/bootstrap.go | 352 ---------------------------------------- route/bootstrap_test.go | 138 ---------------- route/dns.go | 51 +++--- route/dns_test.go | 4 +- route/ipv6seed.go | 138 ++++++++++++++++ route/ipv6seed_test.go | 102 ++++++++++++ route/service.go | 7 + 7 files changed, 270 insertions(+), 522 deletions(-) delete mode 100644 route/bootstrap.go delete mode 100644 route/bootstrap_test.go create mode 100644 route/ipv6seed.go create mode 100644 route/ipv6seed_test.go diff --git a/route/bootstrap.go b/route/bootstrap.go deleted file mode 100644 index 1234852b2..000000000 --- a/route/bootstrap.go +++ /dev/null @@ -1,352 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package route - -import ( - "encoding/hex" - "errors" - "fmt" - "net" - "strings" - "time" - - "github.com/miekg/dns" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/hash" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - mine "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" - "github.com/CovenantSQL/CovenantSQL/proto" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -const ( - nonceAB = "ab." - nonceCD = "cd." -) - -// BPDomain is the default BP domain list. -const BPDomain = "_bp._tcp.gridb.io." - -// TestBPDomain is the default BP domain list for test. -const TestBPDomain = "_bp._tcp.test.gridb.io." - -// DNSClient contains tools for querying nameservers. -type DNSClient struct { - msg *dns.Msg - clt *dns.Client - conf *dns.ClientConfig -} - -// NewDNSClient returns a new DNSClient. -func NewDNSClient() *DNSClient { - m := new(dns.Msg) - m.SetEdns0(4096, true) - - var clientConfig *dns.ClientConfig - var err error - if conf.GConf != nil && len(conf.GConf.DNSSeed.DNSServers) > 0 { - clientConfig = &dns.ClientConfig{ - Servers: conf.GConf.DNSSeed.DNSServers, - Search: make([]string, 0), - Port: "53", - Ndots: 1, - Timeout: 8, - Attempts: 3, - } - } else { - clientConfig, err = dns.ClientConfigFromFile("/etc/resolv.conf") - if err != nil || clientConfig == nil { - log.WithError(err).Error("can not initialize the local resolver") - } - } - - return &DNSClient{ - msg: m, - clt: new(dns.Client), - conf: clientConfig, - } -} - -// Query DNS nameserver and return the response. -func (dc *DNSClient) Query(qname string, qtype uint16) (*dns.Msg, error) { - dc.msg.SetQuestion(qname, qtype) - for _, server := range dc.conf.Servers { - r, _, err := dc.clt.Exchange(dc.msg, server+":"+dc.conf.Port) - if err != nil { - return nil, err - } - if r.Rcode == dns.RcodeSuccess { - return r, err - } - return r, fmt.Errorf("DNS query failed with Rcode %v", r.Rcode) - } - return nil, errors.New("no available name server") -} - -// GetKey returns the DNSKey for a name server. -func (dc *DNSClient) GetKey(name string, keytag uint16) (*dns.DNSKEY, error) { - r, err := dc.Query(name, dns.TypeDNSKEY) - if err != nil { - return nil, fmt.Errorf("DNSKEY record query failed: %v", err) - } - for _, k := range r.Answer { - if k1, ok := k.(*dns.DNSKEY); ok { - if k1.KeyTag() == keytag { - return k1, nil - } - } - } - return nil, errors.New("no DNSKEY returned by nameserver") -} - -// VerifySection checks RRSIGs to make sure the name server is authentic. -func (dc *DNSClient) VerifySection(set []dns.RR) error { - if conf.GConf != nil && !conf.GConf.DNSSeed.EnforcedDNSSEC { - log.Debug("DNSSEC not enforced, just pass verification") - return nil - } - for _, rr := range set { - if rr.Header().Rrtype == dns.TypeRRSIG { - if !rr.(*dns.RRSIG).ValidityPeriod(time.Now().UTC()) { - return fmt.Errorf("signature %s is expired", shortSig(rr.(*dns.RRSIG))) - } - rrset := GetRRSet(set, rr.Header().Name, rr.(*dns.RRSIG).TypeCovered) - key, err := dc.GetKey(rr.(*dns.RRSIG).SignerName, rr.(*dns.RRSIG).KeyTag) - if err != nil { - return fmt.Errorf(";? DNSKEY %s/%d not found, error: %v", rr.(*dns.RRSIG).SignerName, rr.(*dns.RRSIG).KeyTag, err) - } - domain, validDNSKey := conf.GConf.ValidDNSKeys[key.PublicKey] - if !validDNSKey { - return fmt.Errorf("DNSKEY %s not valid", key.PublicKey) - } - log.WithFields(log.Fields{ - "pub": key.PublicKey, - "domain": domain, - }).Debug("valid DNSKEY") - if err := rr.(*dns.RRSIG).Verify(key, rrset); err != nil { - return fmt.Errorf(";- Bogus signature, %s does not validate (DNSKEY %s/%d) [%s]", - shortSig(rr.(*dns.RRSIG)), key.Header().Name, key.KeyTag(), err.Error()) - } - log.WithFields(log.Fields{ - "rrsig": shortSig(rr.(*dns.RRSIG)), - "name": key.Header().Name, - "tag": key.KeyTag(), - "pub": key.PublicKey, - }).Debug("signature secured") - return nil - } - } - return errors.New("not DNSSEC record") -} - -// GetRRSet returns the RRset belonging to the signature with name and type t. -func GetRRSet(l []dns.RR, name string, t uint16) []dns.RR { - var l1 []dns.RR - for _, rr := range l { - if strings.ToLower(rr.Header().Name) == strings.ToLower(name) && rr.Header().Rrtype == t { - l1 = append(l1, rr) - } - } - return l1 -} - -// Shorten RRSIG. -func shortSig(sig *dns.RRSIG) string { - return sig.Header().Name + " RRSIG(" + dns.TypeToString[sig.TypeCovered] + ")" -} - -// GetBPFromDNSSeed returns an array of the BP IP addresses listed at a domain. -func (dc *DNSClient) GetBPFromDNSSeed(BPDomain string) (BPNodes IDNodeMap, err error) { - srvRR := dc.GetSRVRecords(BPDomain) - if srvRR == nil { - err = errors.New("got empty SRV records set") - log.Error(err) - return - } - if err = dc.VerifySection(srvRR.Answer); err != nil { - log.WithError(err).Error("record verify failed") - return - } - BPNodes = make(IDNodeMap) - // For all SRV RRs returned, query for corresponding A RR - for _, rr := range srvRR.Answer { - if srv, ok := rr.(*dns.SRV); ok { - var addr string - var nodeID proto.RawNodeID - aRR := dc.GetARecord(srv.Target) - if aRR != nil { - if err = dc.VerifySection(aRR.Answer); err != nil { - log.WithError(err).Error("verify SRV section failed") - return - } - for _, rr1 := range aRR.Answer { - if ss1, ok := rr1.(*dns.A); ok { - addr = fmt.Sprintf("%s:%d", ss1.A.String(), srv.Port) - fields := strings.SplitN(srv.Target, ".", 2) - if len(fields) > 0 && len(fields[0]) <= proto.NodeIDLen+len("th") && strings.HasPrefix(fields[0], "th") { - nodeIDstr := strings.Repeat("0", proto.NodeIDLen-len(fields[0])+len("th")) + fields[0][len("th"):] - nodeH, err := hash.NewHashFromStr(nodeIDstr) - if err == nil { - nodeID = proto.RawNodeID{Hash: *nodeH} - } - } - - } - } - } - - var ab, cd net.IP - target := nonceAB + srv.Target - ABIPv6R := dc.GetAAAARecord(target) - if ABIPv6R == nil { - err = errors.New("empty AAAA record") - log.WithField("target", target).WithError(err).Error("get AAAA section failed") - return - } - if err = dc.VerifySection(ABIPv6R.Answer); err != nil { - log.WithError(err).WithError(err).Error("verify ab AAAA section failed") - return - } - for _, rr := range ABIPv6R.Answer { - if ss, ok := rr.(*dns.AAAA); ok { - ab = ss.AAAA - break - } - } - - target = nonceCD + srv.Target - CDIPv6R := dc.GetAAAARecord(target) - if CDIPv6R == nil { - err = errors.New("empty AAAA record") - log.WithField("target", target).WithError(err).Error("get AAAA section failed") - return - } - - if err = dc.VerifySection(CDIPv6R.Answer); err != nil { - log.WithField("target", target).WithError(err).Error("verify cd AAAA section failed") - return - } - for _, rr := range CDIPv6R.Answer { - if ss, ok := rr.(*dns.AAAA); ok { - cd = ss.AAAA - break - } - } - - var nonce *mine.Uint256 - nonce, err = mine.FromIPv6(ab, cd) - if err != nil { - log.WithError(err).Error("convert IPv6 addr to nonce failed") - return - } - - var publicKey = new(asymmetric.PublicKey) - publicKeyTXTR := dc.GetTXTRecord(srv.Target) - if publicKeyTXTR == nil { - err = errors.New("empty TXT record") - log.WithField("target", srv.Target).WithError(err).Error("get TXT section failed") - return - } - if err = dc.VerifySection(publicKeyTXTR.Answer); err != nil { - log.WithField("target", srv.Target).WithError(err).Error("verify TXT section failed") - return - } - for _, rr := range publicKeyTXTR.Answer { - if ss, ok := rr.(*dns.TXT); ok { - if len(ss.Txt) == 0 { - err = errors.New("empty TXT record") - log.WithField("target", srv.Target).WithError(err).Error("got empty TXT record") - return - } - publicKeyStr := ss.Txt[0] - log.Debugf("TXT Record: %#v", publicKeyStr) - var pubKeyBytes []byte - // load public key string - pubKeyBytes, err = hex.DecodeString(publicKeyStr) - if err != nil { - log.WithError(err).Error("decode TXT record to hex failed") - return - } - - err = publicKey.UnmarshalBinary(pubKeyBytes) - if err != nil { - log.WithError(err).Error("unmarshal TXT record to public key failed") - return - } - - break - } - } - - if !kms.IsIDPubNonceValid(&nodeID, nonce, publicKey) { - err = fmt.Errorf("ID PubKey Nonce not identical: %s, %v, %x", nodeID.String(), *nonce, publicKey.Serialize()) - log.Error(err) - return - } - BPNodes[nodeID] = proto.Node{ - ID: nodeID.ToNodeID(), - Role: proto.Follower, // Default BP is Follower - Addr: addr, - PublicKey: publicKey, - Nonce: *nonce, - } - } - } - return -} - -// GetSRVRecords retrieves TypeSRV RRs. -func (dc *DNSClient) GetSRVRecords(name string) *dns.Msg { - in, err := dc.Query(name, dns.TypeSRV) - if err != nil { - log.WithField("name", name).WithError(err).Error("SRV record query failed") - return nil - } - return in -} - -// GetARecord retrieves TypeA RRs. -func (dc *DNSClient) GetARecord(name string) *dns.Msg { - in, err := dc.Query(name, dns.TypeA) - if err != nil { - log.WithField("name", name).WithError(err).Error("A record query failed") - return nil - } - return in -} - -// GetAAAARecord retrieves TypeAAAA(IPv6) RRs. -func (dc *DNSClient) GetAAAARecord(name string) *dns.Msg { - in, err := dc.Query(name, dns.TypeAAAA) - if err != nil { - log.WithField("name", name).WithError(err).Error("AAAA record query failed") - return nil - } - return in -} - -// GetTXTRecord retrieves TypeTXT RRs. -func (dc *DNSClient) GetTXTRecord(name string) *dns.Msg { - in, err := dc.Query(name, dns.TypeTXT) - if err != nil { - log.WithField("name", name).WithError(err).Error("TXT record query failed") - return nil - } - return in -} diff --git a/route/bootstrap_test.go b/route/bootstrap_test.go deleted file mode 100644 index 8314e698b..000000000 --- a/route/bootstrap_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package route - -import ( - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/miekg/dns" - - "github.com/CovenantSQL/CovenantSQL/conf" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -func TestGetSRV(t *testing.T) { - log.SetLevel(log.DebugLevel) - _, testFile, _, _ := runtime.Caller(0) - confFile := filepath.Join(filepath.Dir(testFile), "../test/node_c/config.yaml") - - conf.GConf, _ = conf.LoadConfig(confFile) - log.Debugf("GConf: %v", conf.GConf) - - dc := NewDNSClient() - in := dc.GetSRVRecords(BPDomain) - if in != nil { - log.Debugf("answer: %v", in.Answer) - for _, rr := range in.Answer { - if ss, ok := rr.(*dns.SRV); ok { - log.Printf("string: %v", ss.Target) - } - } - - log.Debugf("ns: %v", in.Ns) - log.Debugf("extra: %v", in.Extra) - } -} - -func TestDNSClient_GetRecord_failed(t *testing.T) { - log.SetLevel(log.DebugLevel) - _, testFile, _, _ := runtime.Caller(0) - confFile := filepath.Join(filepath.Dir(testFile), "../test/node_c/config.yaml") - - conf.GConf, _ = conf.LoadConfig(confFile) - log.Debugf("GConf: %v", conf.GConf) - - dc := NewDNSClient() - in := dc.GetAAAARecord("non-exist.xxxx") - if in != nil { - t.Fatal("get AAAA Record should failed") - } - in = dc.GetARecord("non-exist.xxxx") - if in != nil { - t.Fatal("get A Record should failed") - } - in = dc.GetTXTRecord("non-exist.xxxx") - if in != nil { - t.Fatal("get TXT Record should failed") - } - in = dc.GetSRVRecords("non-exist.xxxx") - if in != nil { - t.Fatal("get SRV Record should failed") - } -} - -func TestGetBP(t *testing.T) { - log.SetLevel(log.DebugLevel) - _, testFile, _, _ := runtime.Caller(0) - confFile := filepath.Join(filepath.Dir(testFile), "../test/node_c/config.yaml") - - conf.GConf, _ = conf.LoadConfig(confFile) - log.Debugf("GConf: %v", conf.GConf) - - dc := NewDNSClient() - ips, err := dc.GetBPFromDNSSeed(BPDomain) - if err != nil { - t.Fatalf("error: %v", err) - } else { - log.Debugf("BP addresses: %v", ips) - } - - // not DNSSEC domain - ips, err = dc.GetBPFromDNSSeed("_bp._tcp.gridbase.io.") - if conf.GConf.DNSSeed.EnforcedDNSSEC && (err == nil || !strings.Contains(err.Error(), "not DNSSEC record")) { - t.Fatalf("should be error: %v", err) - } else { - log.Debugf("error: %v", err) - } -} - -func TestGetBPEnforced(t *testing.T) { - log.SetLevel(log.DebugLevel) - _, testFile, _, _ := runtime.Caller(0) - confFile := filepath.Join(filepath.Dir(testFile), "../test/bootstrap.yaml") - - conf.GConf, _ = conf.LoadConfig(confFile) - log.Debugf("GConf: %v", conf.GConf) - - dc := NewDNSClient() - ips, err := dc.GetBPFromDNSSeed(BPDomain) - if err != nil { - t.Fatalf("error: %v", err) - } else { - log.Debugf("BP addresses: %v", ips) - } - - // not DNSSEC domain - ips, err = dc.GetBPFromDNSSeed("_bp._tcp.gridbase.io.") - if conf.GConf.DNSSeed.EnforcedDNSSEC && (err == nil || !strings.Contains(err.Error(), "not DNSSEC record")) { - t.Fatalf("should be error: %v", err) - } else { - log.Debugf("error: %v", err) - } - - // EnforcedDNSSEC but no DNSSEC domain - conf.GConf.DNSSeed.EnforcedDNSSEC = true - ips, err = dc.GetBPFromDNSSeed("_bp._tcp.gridbase.io.") - if conf.GConf.DNSSeed.EnforcedDNSSEC && (err == nil || !strings.Contains(err.Error(), "not DNSSEC record")) { - t.Fatalf("should be error: %v", err) - } else { - log.Debugf("error: %v", err) - } -} diff --git a/route/dns.go b/route/dns.go index db7a11751..afdfa078b 100644 --- a/route/dns.go +++ b/route/dns.go @@ -51,6 +51,7 @@ var ( type Resolver struct { cache NodeIDAddressMap bpNodeIDs NodeIDAddressMap + bpNodes IDNodeMap sync.RWMutex } @@ -117,54 +118,43 @@ func SetNodeAddrCache(id *proto.RawNodeID, addr string) (err error) { // initBPNodeIDs initializes BlockProducer route and map from config file and DNS Seed. func initBPNodeIDs() (bpNodeIDs NodeIDAddressMap) { - // clear address map before init - resolver.bpNodeIDs = make(NodeIDAddressMap) - bpNodeIDs = resolver.bpNodeIDs - if conf.GConf == nil { log.Fatal("call conf.LoadConfig to init conf first") } - var BPNodes = make(IDNodeMap) + // clear address map before init + resolver.bpNodeIDs = make(NodeIDAddressMap) + bpNodeIDs = resolver.bpNodeIDs - // ignore DNS seed in test mode - if !conf.GConf.IsTestMode { - dc := NewDNSClient() - var seedDomain = BPDomain - //seedDomain = TestBPDomain - var err error - BPNodes, err = dc.GetBPFromDNSSeed(seedDomain) + var err error + + if conf.GConf.DNSSeed.Domain != "" { + dc := IPv6SeedClient{} + resolver.bpNodes, err = dc.GetBPFromDNSSeed(conf.GConf.DNSSeed.Domain) if err != nil { - log.WithField("seed", seedDomain).WithError(err).Error("getting BP addr from DNS failed") + log.WithField("seed", conf.GConf.DNSSeed.Domain).WithError(err).Error( + "getting BP addr from DNS failed") return } } + if resolver.bpNodes == nil { + resolver.bpNodes = make(IDNodeMap) + } if conf.GConf.KnownNodes != nil { for _, n := range conf.GConf.KnownNodes { rawID := n.ID.ToRawNodeID() if rawID != nil { if n.Role == proto.Leader || n.Role == proto.Follower { - BPNodes[*rawID] = n + resolver.bpNodes[*rawID] = n } setNodeAddrCache(rawID, n.Addr) } } } - extraBP := *conf.GConf.BP.NodeID.ToRawNodeID() - if _, exists := BPNodes[extraBP]; !exists { - BPNodes[extraBP] = proto.Node{ - ID: conf.GConf.BP.NodeID, - Role: proto.Leader, - Addr: "", - PublicKey: conf.GConf.BP.PublicKey, - Nonce: conf.GConf.BP.Nonce, - } - } - - conf.GConf.SeedBPNodes = make([]proto.Node, 0, len(BPNodes)) - for _, n := range BPNodes { + conf.GConf.SeedBPNodes = make([]proto.Node, 0, len(resolver.bpNodes)) + for _, n := range resolver.bpNodes { rawID := n.ID.ToRawNodeID() if rawID != nil { conf.GConf.SeedBPNodes = append(conf.GConf.SeedBPNodes, n) @@ -177,16 +167,17 @@ func initBPNodeIDs() (bpNodeIDs NodeIDAddressMap) { } // GetBPs returns the known BP node id list. -func GetBPs() (BPAddrs []proto.NodeID) { - BPAddrs = make([]proto.NodeID, 0, len(resolver.bpNodeIDs)) +func GetBPs() (bpAddrs []proto.NodeID) { + bpAddrs = make([]proto.NodeID, 0, len(resolver.bpNodeIDs)) for id := range resolver.bpNodeIDs { - BPAddrs = append(BPAddrs, proto.NodeID(id.String())) + bpAddrs = append(bpAddrs, proto.NodeID(id.String())) } return } // InitKMS inits nasty stuff, only for testing. func InitKMS(PubKeyStoreFile string) { + initResolver() kms.InitPublicKeyStore(PubKeyStoreFile, nil) if conf.GConf.KnownNodes != nil { for _, n := range conf.GConf.KnownNodes { diff --git a/route/dns_test.go b/route/dns_test.go index 71e70d725..c03bdeedf 100644 --- a/route/dns_test.go +++ b/route/dns_test.go @@ -69,8 +69,8 @@ func TestResolver(t *testing.T) { BPmap := initBPNodeIDs() log.Debugf("BPmap: %v", BPmap) BPs := GetBPs() - dc := NewDNSClient() - ips, err := dc.GetBPFromDNSSeed(BPDomain) + dc := IPv6SeedClient{} + ips, err := dc.GetBPFromDNSSeed(TestDomain) log.Debugf("BPs: %v", BPs) So(len(BPs), ShouldBeGreaterThanOrEqualTo, len(ips)) diff --git a/route/ipv6seed.go b/route/ipv6seed.go new file mode 100644 index 000000000..2e1dab7d7 --- /dev/null +++ b/route/ipv6seed.go @@ -0,0 +1,138 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package route + +import ( + "fmt" + + "github.com/CovenantSQL/beacon/ipv6" + "github.com/pkg/errors" + + "github.com/CovenantSQL/CovenantSQL/crypto" + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" +) + +const ( + NODEID = "id." + PUBKEY = "pub." + NONCE = "n." + ADDR = "addr." +) + +type IPv6SeedClient struct{} + +func (isc *IPv6SeedClient) GetBPFromDNSSeed(BPDomain string) (BPNodes IDNodeMap, err error) { + // Public key + pubKeyBuf := make([]byte, asymmetric.PublicKeyBytesLen) + pubKeyBuf[0] = asymmetric.PublicKeyFormatHeader + var pubBuf, nonceBuf, addrBuf []byte + if pubBuf, err = ipv6.FromDomain(PUBKEY + BPDomain); err != nil { + return + } + if len(pubBuf) != asymmetric.PublicKeyBytesLen-1 { + return nil, errors.Errorf("error public key bytes len: %d", len(pubBuf)) + } + copy(pubKeyBuf[1:], pubBuf) + var pubKey asymmetric.PublicKey + err = pubKey.UnmarshalBinary(pubKeyBuf) + if err != nil { + return + } + + // Nonce + if nonceBuf, err = ipv6.FromDomain(NONCE + BPDomain); err != nil { + return + } + nonce, err := cpuminer.Uint256FromBytes(nonceBuf) + if err != nil { + return + } + + // Addr + addrBuf, err = ipv6.FromDomain(ADDR + BPDomain) + if err != nil { + return + } + addrBytes, err := crypto.RemovePKCSPadding(addrBuf) + if err != nil { + return + } + + // NodeID + nodeIdBuf, err := ipv6.FromDomain(NODEID + BPDomain) + if err != nil { + return + } + var nodeId proto.RawNodeID + err = nodeId.SetBytes(nodeIdBuf) + if err != nil { + return + } + + BPNodes = make(IDNodeMap) + BPNodes[nodeId] = proto.Node{ + ID: nodeId.ToNodeID(), + Addr: string(addrBytes), + PublicKey: &pubKey, + Nonce: *nonce, + } + + return +} + +func (isc *IPv6SeedClient) GenBPIPv6(node *proto.Node, domain string) (out string, err error) { + // NodeID + nodeIdIps, err := ipv6.ToIPv6(node.ID.ToRawNodeID().AsBytes()) + if err != nil { + return "", err + } + for i, ip := range nodeIdIps { + out += fmt.Sprintf("%02d.%s%s 1 IN AAAA %s\n", i, NODEID, domain, ip) + } + + // Public key, with leading 1 byte type trimmed + // see: asymmetric.PublicKeyFormatHeader + pubKeyIps, err := ipv6.ToIPv6(node.PublicKey.Serialize()[1:]) + if err != nil { + return "", err + } + for i, ip := range pubKeyIps { + out += fmt.Sprintf("%02d.%s%s 1 IN AAAA %s\n", i, PUBKEY, domain, ip) + } + + // Nonce + nonceIps, err := ipv6.ToIPv6(node.Nonce.Bytes()) + if err != nil { + return "", err + } + for i, ip := range nonceIps { + out += fmt.Sprintf("%02d.%s%s 1 IN AAAA %s\n", i, NONCE, domain, ip) + } + + // Addr + addrIps, err := ipv6.ToIPv6(crypto.AddPKCSPadding([]byte(node.Addr))) + if err != nil { + return "", err + } + for i, ip := range addrIps { + out += fmt.Sprintf("%02d.%s%s 1 IN AAAA %s\n", i, ADDR, domain, ip) + } + + return +} diff --git a/route/ipv6seed_test.go b/route/ipv6seed_test.go new file mode 100644 index 000000000..16c0f2e66 --- /dev/null +++ b/route/ipv6seed_test.go @@ -0,0 +1,102 @@ +/* + * Copyright 2019 The CovenantSQL Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package route + +import ( + "encoding/hex" + "testing" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" + "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" + "github.com/CovenantSQL/CovenantSQL/proto" + "github.com/CovenantSQL/CovenantSQL/utils" + "github.com/CovenantSQL/CovenantSQL/utils/log" +) + +const TestDomain = "unittest.optool.net" +const IntergrationTestDomain = "intergration-test.gridb.io" + +func TestIPv6Seed(t *testing.T) { + isc := IPv6SeedClient{} + log.SetLevel(log.DebugLevel) + Convey("", t, func() { + var pub asymmetric.PublicKey + _ = pub.UnmarshalBinary( + []byte{2, 151, 232, 88, 201, 127, 111, 128, + 208, 117, 192, 223, 212, 5, 209, 42, + 214, 62, 89, 253, 18, 51, 73, 188, + 178, 136, 185, 2, 158, 56, 217, 104, 154}) + + node := proto.Node{ + ID: proto.NodeID("0000000001f26f2145dc770edc385806c6ef131a472ea9ae0f9073d03b4b96d8"), + Addr: "111.111.111.111:11111", + PublicKey: &pub, + Nonce: cpuminer.Uint256{1, 2, 3, 4}, + } + + out, err := isc.GenBPIPv6(&node, TestDomain) + if err != nil { + t.Errorf("gen ipv6 failed: %v", err) + return + } + log.Debug(out) + + nodeBuf, err := utils.EncodeMsgPack(node) + if err != nil { + t.Errorf("marshal node info failed: %v", err) + return + } + log.Debugf("node: %s", nodeBuf) + + m, err := isc.GetBPFromDNSSeed(TestDomain) + So(err, ShouldBeNil) + So(len(m), ShouldEqual, 1) + So(m[*node.ID.ToRawNodeID()].ID, ShouldResemble, node.ID) + So(m[*node.ID.ToRawNodeID()].Addr, ShouldResemble, node.Addr) + So(m[*node.ID.ToRawNodeID()].PublicKey.Serialize(), ShouldResemble, node.PublicKey.Serialize()) + So(m[*node.ID.ToRawNodeID()].Nonce, ShouldResemble, node.Nonce) + }) + Convey(IntergrationTestDomain, t, func() { + var pub asymmetric.PublicKey + pubKeyBytes, _ := hex.DecodeString("02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24") + _ = pub.UnmarshalBinary(pubKeyBytes) + + node := proto.Node{ + ID: proto.NodeID("00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9"), + Addr: "127.0.0.1:3122", + PublicKey: &pub, + Nonce: cpuminer.Uint256{313283, 0, 0, 0}, + } + + out, err := isc.GenBPIPv6(&node, IntergrationTestDomain) + if err != nil { + t.Errorf("gen ipv6 failed: %v", err) + return + } + log.Debug(out) + + m, err := isc.GetBPFromDNSSeed(IntergrationTestDomain) + So(err, ShouldBeNil) + So(len(m), ShouldEqual, 1) + So(m[*node.ID.ToRawNodeID()].ID, ShouldResemble, node.ID) + So(m[*node.ID.ToRawNodeID()].Addr, ShouldResemble, node.Addr) + So(m[*node.ID.ToRawNodeID()].PublicKey.Serialize(), ShouldResemble, node.PublicKey.Serialize()) + So(m[*node.ID.ToRawNodeID()].Nonce, ShouldResemble, node.Nonce) + }) +} diff --git a/route/service.go b/route/service.go index 7b95a0cd6..9f1cbee15 100644 --- a/route/service.go +++ b/route/service.go @@ -102,6 +102,13 @@ func (DHT *DHTService) Ping(req *proto.PingReq, resp *proto.PingResp) (err error return } + // BP node is not permitted to set by RPC + if req.Node.Role == proto.Leader || req.Node.Role == proto.Follower { + err = fmt.Errorf("setting %s node is not permitted", req.Node.Role.String()) + log.Error(err) + return + } + // Checking if ID Nonce Pubkey matched if !kms.IsIDPubNonceValid(req.Node.ID.ToRawNodeID(), &req.Node.Nonce, req.Node.PublicKey) { err = fmt.Errorf("node: %s nonce public key not match", req.Node.ID) From abab9c8e3bfd4a342358fdd1910127d9d8fd62fe Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 16:41:12 +0800 Subject: [PATCH 196/244] Add const PublicKeyBytesLen PublicKeyFormatHeader --- crypto/asymmetric/keypair.go | 9 ++++++++- crypto/kms/config.yaml | 2 +- crypto/kms/pubkeystore.go | 12 ++++++++++-- 3 files changed, 19 insertions(+), 4 deletions(-) diff --git a/crypto/asymmetric/keypair.go b/crypto/asymmetric/keypair.go index 1334847c1..d7ad88025 100644 --- a/crypto/asymmetric/keypair.go +++ b/crypto/asymmetric/keypair.go @@ -32,7 +32,14 @@ import ( ) // PrivateKeyBytesLen defines the length in bytes of a serialized private key. -const PrivateKeyBytesLen = 32 +const PrivateKeyBytesLen = ec.PrivKeyBytesLen + +// PublicKeyBytesLen defines the length in bytes of a serialized public key. +const PublicKeyBytesLen = ec.PubKeyBytesLenCompressed + +// PublicKeyFormatHeader is the default header of PublicKey.Serialize() +// see: github.com/btcsuite/btcd/btcec/pubkey.go#L63 +const PublicKeyFormatHeader byte = 0x2 var parsedPublicKeyCache sync.Map diff --git a/crypto/kms/config.yaml b/crypto/kms/config.yaml index a18156934..800b05193 100644 --- a/crypto/kms/config.yaml +++ b/crypto/kms/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true BlockProducer: PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24" NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9 diff --git a/crypto/kms/pubkeystore.go b/crypto/kms/pubkeystore.go index 2ab4e1d0c..1cbee0c05 100644 --- a/crypto/kms/pubkeystore.go +++ b/crypto/kms/pubkeystore.go @@ -94,9 +94,17 @@ func InitBP() { if conf.GConf == nil { log.Fatal("must call conf.LoadConfig first") } - BP = conf.GConf.BP + if conf.GConf.BP == nil { + seedBP := &conf.GConf.SeedBPNodes[0] + conf.GConf.BP = &conf.BPInfo{ + PublicKey: seedBP.PublicKey, + NodeID: seedBP.ID, + Nonce: seedBP.Nonce, + } + } - err := hash.Decode(&BP.RawNodeID.Hash, string(BP.NodeID)) + BP = conf.GConf.BP + err := hash.Decode(&conf.GConf.BP.RawNodeID.Hash, string(conf.GConf.BP.NodeID)) if err != nil { log.WithError(err).Fatal("BP.NodeID error") } From 98d4b1817859d4152a372239929c96f5d30359bd Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 16:42:36 +0800 Subject: [PATCH 197/244] Add 'cql generate public' func --- cmd/cql/internal/generate.go | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/cmd/cql/internal/generate.go b/cmd/cql/internal/generate.go index e6c35a62b..e29e539a5 100644 --- a/cmd/cql/internal/generate.go +++ b/cmd/cql/internal/generate.go @@ -17,6 +17,7 @@ package internal import ( + "encoding/hex" "fmt" "github.com/CovenantSQL/CovenantSQL/conf" @@ -27,7 +28,7 @@ import ( // CmdGenerate is cql generate command entity. var CmdGenerate = &Command{ - UsageLine: "cql generate [-config file] config/private/wallet/public/nonce", + UsageLine: "cql generate [-config file] config/wallet/public/nonce", Short: "generate config related file or keys", Long: ` Generate command can generate private.key and config.yaml for CovenantSQL. @@ -52,11 +53,13 @@ func runGenerate(cmd *Command, args []string) { switch genType { case "config": - case "private": case "wallet": + configInit() walletGen() case "public": configInit() + publicKey := publicGen() + fmt.Printf("Public key's hex: %s\n", hex.EncodeToString(publicKey.Serialize())) case "nonce": configInit() default: @@ -73,8 +76,6 @@ func privateGen() { } func walletGen() { - configInit() - //TODO if config has wallet, print and return var publicKey *asymmetric.PublicKey @@ -92,13 +93,8 @@ func walletGen() { } else { //use config specific private key file(already init by configInit()) ConsoleLog.Infof("generate wallet address directly from private key: %s", conf.GConf.PrivateKeyFile) - privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) - if err != nil { - ConsoleLog.WithError(err).Error("load private key file failed") - SetExitStatus(1) - return - } - publicKey = privateKey.PubKey() + publicKey = publicGen() + ExitIfErrors() } keyHash, err := crypto.PubKeyHash(publicKey) @@ -113,7 +109,14 @@ func walletGen() { //TODO store in config.yaml } -func publicGen() { +func publicGen() *asymmetric.PublicKey { + privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) + if err != nil { + ConsoleLog.WithError(err).Error("load private key file failed") + SetExitStatus(1) + return nil + } + return privateKey.PubKey() } func nonceGen() { From 267771200d7086a8417e2bdd78381e30c22695b6 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 16:43:48 +0800 Subject: [PATCH 198/244] Delete keytool func in cql-utils cmd --- cmd/cql-utils/keytool.go | 41 ---------------------------------------- cmd/cql-utils/main.go | 9 +-------- 2 files changed, 1 insertion(+), 49 deletions(-) delete mode 100644 cmd/cql-utils/keytool.go diff --git a/cmd/cql-utils/keytool.go b/cmd/cql-utils/keytool.go deleted file mode 100644 index d96afcf64..000000000 --- a/cmd/cql-utils/keytool.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2018 The CovenantSQL Authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - "github.com/CovenantSQL/CovenantSQL/utils/log" -) - -func runKeytool() { - masterKey, err := readMasterKey() - if err != nil { - fmt.Printf("read master key failed: %v\n", err) - os.Exit(1) - } - - privateKey, err := kms.LoadPrivateKey(privateKeyFile, []byte(masterKey)) - if err != nil { - log.WithError(err).Error("load private key failed") - } - - fmt.Printf("Public key's hex: %s\n", hex.EncodeToString(privateKey.PubKey().Serialize())) -} diff --git a/cmd/cql-utils/main.go b/cmd/cql-utils/main.go index 28416b97a..fb72e8b91 100644 --- a/cmd/cql-utils/main.go +++ b/cmd/cql-utils/main.go @@ -46,7 +46,7 @@ const name = "cql-utils" func init() { log.SetLevel(log.InfoLevel) - flag.StringVar(&tool, "tool", "", "Tool type, miner, keytool, nonce, confgen") + flag.StringVar(&tool, "tool", "", "Tool type, miner, nonce, confgen") flag.StringVar(&publicKeyHex, "public", "", "Public key hex string to mine node id/nonce") flag.StringVar(&privateKeyFile, "private", "~/.cql/private.key", "Private key file to generate/show") flag.StringVar(&configFile, "config", "~/.cql/config.yaml", "Config file to use") @@ -82,13 +82,6 @@ func main() { // os.Exit(1) // } // runKeygen() - case "keytool": - if privateKeyFile == "" { - // error - log.Error("privateKey path is required for keytool") - os.Exit(1) - } - runKeytool() case "nonce": runNonce() case "confgen": From 9d6e4263826b30e7fd64acc376c9e32404c1ee97 Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 17:20:16 +0800 Subject: [PATCH 199/244] Add getPublic func for get existing public key --- cmd/cql/internal/generate.go | 61 +++++++++++++++++++----------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/cmd/cql/internal/generate.go b/cmd/cql/internal/generate.go index e29e539a5..faef3e02e 100644 --- a/cmd/cql/internal/generate.go +++ b/cmd/cql/internal/generate.go @@ -24,6 +24,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/crypto" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/kms" + mine "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" ) // CmdGenerate is cql generate command entity. @@ -62,6 +63,7 @@ func runGenerate(cmd *Command, args []string) { fmt.Printf("Public key's hex: %s\n", hex.EncodeToString(publicKey.Serialize())) case "nonce": configInit() + _ = nonceGen() default: cmd.Usage() SetExitStatus(1) @@ -69,33 +71,43 @@ func runGenerate(cmd *Command, args []string) { } } -func configGen() { -} - -func privateGen() { +func publicGen() *asymmetric.PublicKey { + //use config specific private key file(already init by configInit()) + ConsoleLog.Infof("generate public key directly from private key: %s", conf.GConf.PrivateKeyFile) + privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) + if err != nil { + ConsoleLog.WithError(err).Error("load private key file failed") + SetExitStatus(1) + ExitIfErrors() + } + return privateKey.PubKey() } -func walletGen() { - //TODO if config has wallet, print and return - - var publicKey *asymmetric.PublicKey - +func getPublic() *asymmetric.PublicKey { //if config has public, use it for _, node := range conf.GConf.KnownNodes { if node.ID == conf.GConf.ThisNodeID { - publicKey = node.PublicKey + if node.PublicKey != nil { + ConsoleLog.Infof("use public key in config file: %s", configFile) + return node.PublicKey + } break } } - if publicKey != nil { - ConsoleLog.Infof("use public key in config file: %s", configFile) - } else { - //use config specific private key file(already init by configInit()) - ConsoleLog.Infof("generate wallet address directly from private key: %s", conf.GConf.PrivateKeyFile) - publicKey = publicGen() - ExitIfErrors() - } + return publicGen() +} + +func configGen() { +} + +func privateGen() { +} + +func walletGen() { + //TODO if config has wallet, print and return + + publicKey := getPublic() keyHash, err := crypto.PubKeyHash(publicKey) if err != nil { @@ -109,15 +121,6 @@ func walletGen() { //TODO store in config.yaml } -func publicGen() *asymmetric.PublicKey { - privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) - if err != nil { - ConsoleLog.WithError(err).Error("load private key file failed") - SetExitStatus(1) - return nil - } - return privateKey.PubKey() -} - -func nonceGen() { +func nonceGen() *mine.NonceInfo { + return nil } From 4d886bbce51a279cb7efdcb8e0ad98fa46ba5880 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 17:42:21 +0800 Subject: [PATCH 200/244] Simplify generated cql conf --- conf/config.go | 18 +++++- conf/config_test.go | 16 ++--- conf/testnet/config.yaml | 2 +- conf/testnet/parameters.go | 127 ++----------------------------------- 4 files changed, 31 insertions(+), 132 deletions(-) diff --git a/conf/config.go b/conf/config.go index 1d4fff04f..fca33964f 100644 --- a/conf/config.go +++ b/conf/config.go @@ -100,11 +100,12 @@ type MinerInfo struct { type DNSSeed struct { EnforcedDNSSEC bool `yaml:"EnforcedDNSSEC"` DNSServers []string `yaml:"DNSServers"` + Domain string `yaml:"Domain"` } // Config holds all the config read from yaml config file. type Config struct { - IsTestMode bool `yaml:"IsTestMode,omitempty"` // when testMode use default empty masterKey and test DNS domain + UseTestMasterKey bool `yaml:"UseTestMasterKey,omitempty"` // when UseTestMasterKey use default empty masterKey // StartupSyncHoles indicates synchronizing hole blocks from other peers on BP // startup/reloading. StartupSyncHoles bool `yaml:"StartupSyncHoles,omitempty"` @@ -156,6 +157,21 @@ func LoadConfig(configPath string) (config *Config, err error) { return } + if config.WorkingRoot == "" { + config.WorkingRoot = "./" + } + + if config.PrivateKeyFile == "" { + config.PrivateKeyFile = "private.key" + } + + if config.PubKeyStoreFile == "" { + config.PubKeyStoreFile = "public.keystore" + } + if config.DHTFileName == "" { + config.DHTFileName = "dht.db" + } + configDir := path.Dir(configPath) if !path.IsAbs(config.PubKeyStoreFile) { config.PubKeyStoreFile = path.Join(configDir, config.PubKeyStoreFile) diff --git a/conf/config_test.go b/conf/config_test.go index 6917cf207..417fa4ca2 100644 --- a/conf/config_test.go +++ b/conf/config_test.go @@ -70,14 +70,14 @@ func TestConf(t *testing.T) { Convey("LoadConfig", t, func() { defer os.Remove(testFile) config := &Config{ - IsTestMode: false, - GenerateKeyPair: false, - WorkingRoot: "", - PubKeyStoreFile: "", - PrivateKeyFile: "", - DHTFileName: "", - ListenAddr: "", - ThisNodeID: "", + UseTestMasterKey: false, + GenerateKeyPair: false, + WorkingRoot: "", + PubKeyStoreFile: "", + PrivateKeyFile: "", + DHTFileName: "", + ListenAddr: "", + ThisNodeID: "", ValidDNSKeys: map[string]string{ // Cloudflare.com DNSKEY. SEE: `dig +multi cloudflare.com DNSKEY` "koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==": "cloudflare.com", diff --git a/conf/testnet/config.yaml b/conf/testnet/config.yaml index e2d0655dd..2e2d61ec3 100644 --- a/conf/testnet/config.yaml +++ b/conf/testnet/config.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/conf/testnet/parameters.go b/conf/testnet/parameters.go index b127193e2..a32a79c6f 100644 --- a/conf/testnet/parameters.go +++ b/conf/testnet/parameters.go @@ -25,131 +25,14 @@ import ( ) const ( - // TestNetConfigYAML is the config string in YAML format of the CovenantSQL TestNet. - TestNetConfigYAML = `IsTestMode: true -StartupSyncHoles: true -WorkingRoot: "./" -PubKeyStoreFile: "public.keystore" -PrivateKeyFile: "private.key" -DHTFileName: "dht.db" -ListenAddr: "0.0.0.0:15151" -ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" -QPS: 1000 -BillingBlockCount: 60 -BPPeriod: 10s -BPTick: 3s -SQLChainPeriod: 60s -SQLChainTick: 10s -SQLChainTTL: 10 -MinProviderDeposit: 1000000 -ValidDNSKeys: - koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com - mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com - oJMRESz5E4gYzS/q6XDrvU1qMPYIjCWzJaOau8XNEZeqCYKD5ar0IRd8KqXXFJkqmVfRvMGPmM1x8fGAa2XhSA==: cloudflare.com -MinNodeIDDifficulty: 2 + // CQLConfigYAML is the config string in YAML format of the CovenantSQL TestNet. + CQLConfigYAML = ` DNSSeed: - EnforcedDNSSEC: false - DNSServers: - - 1.1.1.1 - - 202.46.34.74 - - 202.46.34.75 - - 202.46.34.76 + Domain: "bp.testnet.gridb.io" + Adapter: ListenAddr: "127.0.0.1:4661" StorageDriver: covenantsql - -BlockProducer: - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - NodeID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 - Nonce: - a: 14396347928 - b: 0 - c: 0 - d: 6148914694092305796 - ChainFileName: "chain.db" - -KnownNodes: -- ID: 00000000000589366268c274fdc11ec8bdb17e668d2f619555a2e9c1a29c91d8 - Nonce: - a: 14396347928 - b: 0 - c: 0 - d: 6148914694092305796 - Addr: bp00.cn.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Leader -- ID: 000000000013fd4b3180dd424d5a895bc57b798e5315087b7198c926d8893f98 - Nonce: - a: 789554103 - b: 0 - c: 0 - d: 8070450536379825883 - Addr: bp01.cn.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Follower -- ID: 00000000001771e2b2e12b6f9f85d58ef5261a4b98a2e80bba0c5ef7bd72c499 - Nonce: - a: 1822880492 - b: 0 - c: 0 - d: 8646911286604382906 - Addr: bp02.cn.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Follower -- ID: 000000000014a2f14e79aec0a27a2a669aab416c392d5577760d43ed8503020d - Nonce: - a: 2552803966 - b: 0 - c: 0 - d: 9079256850862786277 - Addr: bp03.cn.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Follower -- ID: 00000000003b2bd120a7d07f248b181fc794ba8b278f07f9a780e61eb77f6abb - Nonce: - a: 2449538793 - b: 0 - c: 0 - d: 8791026473473316840 - Addr: bp04.hk.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Follower -- ID: 0000000000293f7216362791b6b1c9772184d6976cb34310c42547735410186c - Nonce: - a: 746598970 - b: 0 - c: 0 - d: 10808639108098016056 - Addr: bp05.cn.gridb.io:7777 - PublicKey: "02c1db96f2ba7e1cb4e9822d12de0f63fb666feb828c7f509e81fab9bd7a34039c" - Role: Follower -- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade - Nonce: - a: 567323 - b: 0 - c: 0 - d: 3104982049 - Addr: miner00.cn.gridb.io:7778 - PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20 - Role: Miner -- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5 - Nonce: - a: 240524 - b: 0 - c: 0 - d: 2305843010430351476 - Addr: miner01.cn.gridb.io:7778 - PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16 - Role: Miner -- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d - Nonce: - a: 22403 - b: 0 - c: 0 - d: 0 - Addr: "" - PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4 - Role: Client ` ) @@ -157,7 +40,7 @@ KnownNodes: func GetTestNetConfig() (config *conf.Config) { var err error config = &conf.Config{} - if err = yaml.Unmarshal([]byte(TestNetConfigYAML), config); err != nil { + if err = yaml.Unmarshal([]byte(CQLConfigYAML), config); err != nil { log.WithError(err).Fatal("failed to unmarshal testnet config") } return From 1835b4b4095c11c52a9a482455d8f9004875ec68 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 17:43:12 +0800 Subject: [PATCH 201/244] Use UseTestMasterKey in yaml --- rpc/_example/conf/node1.yaml | 2 +- rpc/_example/conf/node2.yaml | 2 +- rpc/_example/conf/tracker.yaml | 2 +- rpc/rpcutil.go | 2 ++ 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/rpc/_example/conf/node1.yaml b/rpc/_example/conf/node1.yaml index 03d9c4dde..8790228be 100644 --- a/rpc/_example/conf/node1.yaml +++ b/rpc/_example/conf/node1.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true ListenAddr: "127.0.0.1:12230" ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" PubKeyStoreFile: "./node1.public.keystore" diff --git a/rpc/_example/conf/node2.yaml b/rpc/_example/conf/node2.yaml index a7ff23310..20299d3ac 100644 --- a/rpc/_example/conf/node2.yaml +++ b/rpc/_example/conf/node2.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true ListenAddr: "127.0.0.1:12231" ThisNodeID: "000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade" PubKeyStoreFile: "./node2.public.keystore" diff --git a/rpc/_example/conf/tracker.yaml b/rpc/_example/conf/tracker.yaml index 0b9c2023c..b2e800895 100644 --- a/rpc/_example/conf/tracker.yaml +++ b/rpc/_example/conf/tracker.yaml @@ -1,4 +1,4 @@ -IsTestMode: true +UseTestMasterKey: true ListenAddr: "127.0.0.1:2230" ThisNodeID: "00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9" PubKeyStoreFile: "./tracker.public.keystore" diff --git a/rpc/rpcutil.go b/rpc/rpcutil.go index 2c1e47d51..8605a5542 100644 --- a/rpc/rpcutil.go +++ b/rpc/rpcutil.go @@ -41,10 +41,12 @@ var ( // ErrNoChiefBlockProducerAvailable defines failure on find chief block producer. ErrNoChiefBlockProducerAvailable = errors.New("no chief block producer found") + //FIXME(auxten): remove currentBP stuff // currentBP represents current chief block producer node. currentBP proto.NodeID // currentBPLock represents the chief block producer access lock. currentBPLock sync.Mutex + // callRPCExpvarLock is the lock of RPC Call Publish lock callRPCExpvarLock sync.Mutex ) From c098b330bff1507727ed3efe8a1303bd22642aae Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 17:44:26 +0800 Subject: [PATCH 202/244] Cleanup all conf.GConf.IsTestMode --- cmd/cql-minerd/node.go | 2 +- cmd/cql-observer/node.go | 2 +- cmd/cql-utils/confgen.go | 7 +++++-- cmd/cqld/bootstrap.go | 2 +- sqlchain/chain_test.go | 16 ++++++++-------- sqlchain/observer/config_test.go | 4 ++-- test/integration/node_c/config.yaml | 4 ---- xenomint/mux_test.go | 2 +- 8 files changed, 19 insertions(+), 20 deletions(-) diff --git a/cmd/cql-minerd/node.go b/cmd/cql-minerd/node.go index d4572b5e0..0c152c4b4 100644 --- a/cmd/cql-minerd/node.go +++ b/cmd/cql-minerd/node.go @@ -33,7 +33,7 @@ import ( func initNode() (server *rpc.Server, err error) { var masterKey []byte - if !conf.GConf.IsTestMode { + if !conf.GConf.UseTestMasterKey { // read master key fmt.Print("Type in Master key to continue: ") masterKey, err = terminal.ReadPassword(syscall.Stdin) diff --git a/cmd/cql-observer/node.go b/cmd/cql-observer/node.go index c49738de7..bfb24daa9 100644 --- a/cmd/cql-observer/node.go +++ b/cmd/cql-observer/node.go @@ -30,7 +30,7 @@ import ( func initNode() (err error) { var masterKey []byte - if !conf.GConf.IsTestMode { + if !conf.GConf.UseTestMasterKey { fmt.Print("Type in Master key to continue:") masterKey, err = terminal.ReadPassword(syscall.Stdin) if err != nil { diff --git a/cmd/cql-utils/confgen.go b/cmd/cql-utils/confgen.go index 7ab72d514..02c054f95 100644 --- a/cmd/cql-utils/confgen.go +++ b/cmd/cql-utils/confgen.go @@ -74,13 +74,16 @@ func runConfgen() { testnetConfig.PrivateKeyFile = privateKeyFileName testnetConfig.PubKeyStoreFile = publicKeystoreFileName testnetConfig.ThisNodeID = cliNodeID - testnetConfig.KnownNodes = append(testnetConfig.KnownNodes, proto.Node{ + if testnetConfig.KnownNodes == nil { + testnetConfig.KnownNodes = make([]proto.Node, 1) + } + testnetConfig.KnownNodes[0] = proto.Node{ ID: cliNodeID, Role: proto.Client, Addr: "0.0.0.0:15151", PublicKey: publicKey, Nonce: nonce.Nonce, - }) + } // Write config out, err := yaml.Marshal(testnetConfig) diff --git a/cmd/cqld/bootstrap.go b/cmd/cqld/bootstrap.go index 7092445b3..7560222e2 100644 --- a/cmd/cqld/bootstrap.go +++ b/cmd/cqld/bootstrap.go @@ -47,7 +47,7 @@ func runNode(nodeID proto.NodeID, listenAddr string) (err error) { } var masterKey []byte - if !conf.GConf.IsTestMode { + if !conf.GConf.UseTestMasterKey { // read master key fmt.Print("Type in Master key to continue: ") masterKey, err = terminal.ReadPassword(syscall.Stdin) diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go index 4fbba3cf9..b8c2e6b68 100644 --- a/sqlchain/chain_test.go +++ b/sqlchain/chain_test.go @@ -206,14 +206,14 @@ func TestMultiChain(t *testing.T) { } conf.GConf = &conf.Config{ - IsTestMode: true, - GenerateKeyPair: false, - WorkingRoot: testDataDir, - PubKeyStoreFile: "public.keystore", - PrivateKeyFile: "private.key", - DHTFileName: "dht.db", - ListenAddr: bpsvr.Listener.Addr().String(), - ThisNodeID: bpinfo.NodeID, + UseTestMasterKey: true, + GenerateKeyPair: false, + WorkingRoot: testDataDir, + PubKeyStoreFile: "public.keystore", + PrivateKeyFile: "private.key", + DHTFileName: "dht.db", + ListenAddr: bpsvr.Listener.Addr().String(), + ThisNodeID: bpinfo.NodeID, ValidDNSKeys: map[string]string{ "koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==": "cloudflare.com", "mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==": "cloudflare.com", diff --git a/sqlchain/observer/config_test.go b/sqlchain/observer/config_test.go index 30326a664..e0eeecfa5 100644 --- a/sqlchain/observer/config_test.go +++ b/sqlchain/observer/config_test.go @@ -57,7 +57,7 @@ func TestLoadConfig(t *testing.T) { }) Convey("Given a config file without observer section", func() { err = ioutil.WriteFile(fl, []byte( - `IsTestMode: true + `UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" @@ -184,7 +184,7 @@ KnownNodes: }) Convey("Given a full config file", func() { err = ioutil.WriteFile(fl, []byte( - `IsTestMode: true + `UseTestMasterKey: true WorkingRoot: "./" PubKeyStoreFile: "public.keystore" PrivateKeyFile: "private.key" diff --git a/test/integration/node_c/config.yaml b/test/integration/node_c/config.yaml index ca473fabe..2669cff94 100644 --- a/test/integration/node_c/config.yaml +++ b/test/integration/node_c/config.yaml @@ -1,8 +1,4 @@ UseTestMasterKey: true -WorkingRoot: "./" -PubKeyStoreFile: "public.keystore" -PrivateKeyFile: "private.key" -DHTFileName: "dht.db" ListenAddr: "127.0.0.1:3120" ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d" diff --git a/xenomint/mux_test.go b/xenomint/mux_test.go index 215f80ce9..358ebf1a9 100644 --- a/xenomint/mux_test.go +++ b/xenomint/mux_test.go @@ -79,7 +79,7 @@ func setupMuxParallel(priv *ca.PrivateKey) ( nis[2].Role = proto.Client // Setup global config conf.GConf = &conf.Config{ - IsTestMode: true, + UseTestMasterKey: true, GenerateKeyPair: false, MinNodeIDDifficulty: testingNonceDifficulty, BP: &conf.BPInfo{ From 729d987cd8c1da4c75ea623317593ec29df6898e Mon Sep 17 00:00:00 2001 From: laodouya Date: Tue, 26 Mar 2019 17:52:09 +0800 Subject: [PATCH 203/244] Move getPublic() to cql/cfg.go, remove 'cql generate nonce' type. --- cmd/cql/internal/cfg.go | 41 ++++++++++----------- cmd/cql/internal/generate.go | 70 ++++++++++++++++-------------------- 2 files changed, 49 insertions(+), 62 deletions(-) diff --git a/cmd/cql/internal/cfg.go b/cmd/cql/internal/cfg.go index ebf186d04..e3319d851 100644 --- a/cmd/cql/internal/cfg.go +++ b/cmd/cql/internal/cfg.go @@ -17,13 +17,11 @@ package internal import ( - "bufio" "context" "errors" "fmt" "os" "path/filepath" - "strings" "syscall" pi "github.com/CovenantSQL/CovenantSQL/blockproducer/interfaces" @@ -31,6 +29,7 @@ import ( "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" "github.com/CovenantSQL/CovenantSQL/crypto/hash" + "github.com/CovenantSQL/CovenantSQL/crypto/kms" "github.com/CovenantSQL/CovenantSQL/utils" "github.com/CovenantSQL/CovenantSQL/utils/log" "github.com/sirupsen/logrus" @@ -137,27 +136,25 @@ func readMasterKey(skip bool) string { return string(bytePwd) } -func askDeletePath(path string) { - if _, err := os.Stat(path); err == nil { - reader := bufio.NewReader(os.Stdin) - fmt.Printf("\"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", - path) - t, err := reader.ReadString('\n') - t = strings.Trim(t, "\n") - if err != nil { - ConsoleLog.WithError(err).Error("unexpected error") - SetExitStatus(1) - Exit() - } - if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { - err = os.RemoveAll(path) - if err != nil { - ConsoleLog.WithError(err).Error("unexpected error") - SetExitStatus(1) - Exit() +func getPublic() *asymmetric.PublicKey { + //if config has public, use it + for _, node := range conf.GConf.KnownNodes { + if node.ID == conf.GConf.ThisNodeID { + if node.PublicKey != nil { + ConsoleLog.Infof("use public key in config file: %s", configFile) + return node.PublicKey } - } else { - Exit() + break } } + + //use config specific private key file(already init by configInit()) + ConsoleLog.Infof("generate public key directly from private key: %s", conf.GConf.PrivateKeyFile) + privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) + if err != nil { + ConsoleLog.WithError(err).Error("load private key file failed") + SetExitStatus(1) + ExitIfErrors() + } + return privateKey.PubKey() } diff --git a/cmd/cql/internal/generate.go b/cmd/cql/internal/generate.go index faef3e02e..4ed79a6e4 100644 --- a/cmd/cql/internal/generate.go +++ b/cmd/cql/internal/generate.go @@ -17,19 +17,18 @@ package internal import ( + "bufio" "encoding/hex" "fmt" + "os" + "strings" - "github.com/CovenantSQL/CovenantSQL/conf" "github.com/CovenantSQL/CovenantSQL/crypto" - "github.com/CovenantSQL/CovenantSQL/crypto/asymmetric" - "github.com/CovenantSQL/CovenantSQL/crypto/kms" - mine "github.com/CovenantSQL/CovenantSQL/pow/cpuminer" ) // CmdGenerate is cql generate command entity. var CmdGenerate = &Command{ - UsageLine: "cql generate [-config file] config/wallet/public/nonce", + UsageLine: "cql generate [-config file] config/wallet/public", Short: "generate config related file or keys", Long: ` Generate command can generate private.key and config.yaml for CovenantSQL. @@ -44,6 +43,31 @@ func init() { addCommonFlags(CmdGenerate) } +func askDeletePath(path string) { + if _, err := os.Stat(path); err == nil { + reader := bufio.NewReader(os.Stdin) + fmt.Printf("\"%s\" already exists. \nDo you want to delete it? (y or n, press Enter for default n):\n", + path) + t, err := reader.ReadString('\n') + t = strings.Trim(t, "\n") + if err != nil { + ConsoleLog.WithError(err).Error("unexpected error") + SetExitStatus(1) + Exit() + } + if strings.Compare(t, "y") == 0 || strings.Compare(t, "yes") == 0 { + err = os.RemoveAll(path) + if err != nil { + ConsoleLog.WithError(err).Error("unexpected error") + SetExitStatus(1) + Exit() + } + } else { + Exit() + } + } +} + func runGenerate(cmd *Command, args []string) { if len(args) != 1 { ConsoleLog.Error("Generate command need specific type as params") @@ -59,11 +83,8 @@ func runGenerate(cmd *Command, args []string) { walletGen() case "public": configInit() - publicKey := publicGen() + publicKey := getPublic() fmt.Printf("Public key's hex: %s\n", hex.EncodeToString(publicKey.Serialize())) - case "nonce": - configInit() - _ = nonceGen() default: cmd.Usage() SetExitStatus(1) @@ -71,33 +92,6 @@ func runGenerate(cmd *Command, args []string) { } } -func publicGen() *asymmetric.PublicKey { - //use config specific private key file(already init by configInit()) - ConsoleLog.Infof("generate public key directly from private key: %s", conf.GConf.PrivateKeyFile) - privateKey, err := kms.LoadPrivateKey(conf.GConf.PrivateKeyFile, []byte(password)) - if err != nil { - ConsoleLog.WithError(err).Error("load private key file failed") - SetExitStatus(1) - ExitIfErrors() - } - return privateKey.PubKey() -} - -func getPublic() *asymmetric.PublicKey { - //if config has public, use it - for _, node := range conf.GConf.KnownNodes { - if node.ID == conf.GConf.ThisNodeID { - if node.PublicKey != nil { - ConsoleLog.Infof("use public key in config file: %s", configFile) - return node.PublicKey - } - break - } - } - - return publicGen() -} - func configGen() { } @@ -120,7 +114,3 @@ func walletGen() { //TODO store in config.yaml } - -func nonceGen() *mine.NonceInfo { - return nil -} From b42e42ff705ed1e497fa75b82be7432ae3cdb076 Mon Sep 17 00:00:00 2001 From: auxten Date: Tue, 26 Mar 2019 17:58:33 +0800 Subject: [PATCH 204/244] Remove DNSSEC stuff package --- Gopkg.lock | 58 +- Gopkg.toml | 4 + .../beacon}/LICENSE | 5 +- .../CovenantSQL/beacon/ipv6/ipv6.go | 60 + vendor/github.com/CovenantSQL/xurls/LICENSE | 27 - vendor/github.com/CovenantSQL/xurls/README.md | 30 - .../github.com/CovenantSQL/xurls/schemes.go | 298 -- vendor/github.com/CovenantSQL/xurls/tlds.go | 1562 -------- .../CovenantSQL/xurls/tlds_pseudo.go | 24 - vendor/github.com/CovenantSQL/xurls/xurls.go | 103 - vendor/github.com/beevik/ntp/CONTRIBUTORS | 7 - vendor/github.com/beevik/ntp/LICENSE | 24 - vendor/github.com/beevik/ntp/README.md | 71 - vendor/github.com/beevik/ntp/RELEASE_NOTES.md | 54 - vendor/github.com/beevik/ntp/ntp.go | 565 --- .../go-opengraph/opengraph/opengraph.go | 365 -- vendor/github.com/miekg/dns/.codecov.yml | 8 - vendor/github.com/miekg/dns/AUTHORS | 1 - vendor/github.com/miekg/dns/CONTRIBUTORS | 10 - vendor/github.com/miekg/dns/COPYRIGHT | 9 - vendor/github.com/miekg/dns/Gopkg.lock | 57 - vendor/github.com/miekg/dns/Gopkg.toml | 38 - vendor/github.com/miekg/dns/LICENSE | 32 - vendor/github.com/miekg/dns/Makefile.fuzz | 33 - vendor/github.com/miekg/dns/Makefile.release | 52 - vendor/github.com/miekg/dns/README.md | 169 - vendor/github.com/miekg/dns/acceptfunc.go | 54 - vendor/github.com/miekg/dns/client.go | 496 --- vendor/github.com/miekg/dns/clientconfig.go | 139 - vendor/github.com/miekg/dns/dane.go | 43 - vendor/github.com/miekg/dns/defaults.go | 288 -- vendor/github.com/miekg/dns/dns.go | 103 - vendor/github.com/miekg/dns/dnssec.go | 801 ---- vendor/github.com/miekg/dns/dnssec_keygen.go | 178 - vendor/github.com/miekg/dns/dnssec_keyscan.go | 352 -- vendor/github.com/miekg/dns/dnssec_privkey.go | 93 - vendor/github.com/miekg/dns/doc.go | 269 -- vendor/github.com/miekg/dns/duplicate.go | 25 - .../miekg/dns/duplicate_generate.go | 158 - vendor/github.com/miekg/dns/edns.go | 623 --- vendor/github.com/miekg/dns/format.go | 87 - vendor/github.com/miekg/dns/fuzz.go | 23 - vendor/github.com/miekg/dns/generate.go | 242 -- vendor/github.com/miekg/dns/labels.go | 191 - vendor/github.com/miekg/dns/listen_go111.go | 44 - .../github.com/miekg/dns/listen_go_not111.go | 23 - vendor/github.com/miekg/dns/msg.go | 1231 ------ vendor/github.com/miekg/dns/msg_generate.go | 345 -- vendor/github.com/miekg/dns/msg_helpers.go | 633 --- vendor/github.com/miekg/dns/nsecx.go | 95 - vendor/github.com/miekg/dns/privaterr.go | 151 - vendor/github.com/miekg/dns/reverse.go | 43 - vendor/github.com/miekg/dns/sanitize.go | 85 - vendor/github.com/miekg/dns/scan.go | 1331 ------- vendor/github.com/miekg/dns/scan_rr.go | 2209 ----------- vendor/github.com/miekg/dns/serve_mux.go | 147 - vendor/github.com/miekg/dns/server.go | 868 ---- vendor/github.com/miekg/dns/sig0.go | 217 - vendor/github.com/miekg/dns/singleinflight.go | 57 - vendor/github.com/miekg/dns/smimea.go | 47 - vendor/github.com/miekg/dns/tlsa.go | 47 - vendor/github.com/miekg/dns/tsig.go | 386 -- vendor/github.com/miekg/dns/types.go | 1412 ------- vendor/github.com/miekg/dns/types_generate.go | 278 -- vendor/github.com/miekg/dns/udp.go | 102 - vendor/github.com/miekg/dns/udp_windows.go | 37 - vendor/github.com/miekg/dns/update.go | 106 - vendor/github.com/miekg/dns/version.go | 15 - vendor/github.com/miekg/dns/xfr.go | 260 -- vendor/github.com/miekg/dns/zduplicate.go | 943 ----- vendor/github.com/miekg/dns/zmsg.go | 3475 ----------------- vendor/github.com/miekg/dns/ztypes.go | 863 ---- vendor/golang.org/x/net/bpf/asm.go | 41 - vendor/golang.org/x/net/bpf/constants.go | 222 -- vendor/golang.org/x/net/bpf/doc.go | 82 - vendor/golang.org/x/net/bpf/instructions.go | 726 ---- vendor/golang.org/x/net/bpf/setter.go | 10 - vendor/golang.org/x/net/bpf/vm.go | 150 - .../golang.org/x/net/bpf/vm_instructions.go | 181 - vendor/golang.org/x/net/html/atom/atom.go | 78 - vendor/golang.org/x/net/html/atom/gen.go | 712 ---- vendor/golang.org/x/net/html/atom/table.go | 783 ---- vendor/golang.org/x/net/html/const.go | 112 - vendor/golang.org/x/net/html/doc.go | 106 - vendor/golang.org/x/net/html/doctype.go | 156 - vendor/golang.org/x/net/html/entity.go | 2253 ----------- vendor/golang.org/x/net/html/escape.go | 258 -- vendor/golang.org/x/net/html/foreign.go | 226 -- vendor/golang.org/x/net/html/node.go | 220 -- vendor/golang.org/x/net/html/parse.go | 2311 ----------- vendor/golang.org/x/net/html/render.go | 271 -- vendor/golang.org/x/net/html/token.go | 1219 ------ .../golang.org/x/net/internal/iana/const.go | 223 -- vendor/golang.org/x/net/internal/iana/gen.go | 383 -- .../x/net/internal/socket/cmsghdr.go | 11 - .../x/net/internal/socket/cmsghdr_bsd.go | 13 - .../internal/socket/cmsghdr_linux_32bit.go | 14 - .../internal/socket/cmsghdr_linux_64bit.go | 14 - .../internal/socket/cmsghdr_solaris_64bit.go | 14 - .../x/net/internal/socket/cmsghdr_stub.go | 17 - .../x/net/internal/socket/defs_darwin.go | 44 - .../x/net/internal/socket/defs_dragonfly.go | 44 - .../x/net/internal/socket/defs_freebsd.go | 44 - .../x/net/internal/socket/defs_linux.go | 49 - .../x/net/internal/socket/defs_netbsd.go | 47 - .../x/net/internal/socket/defs_openbsd.go | 44 - .../x/net/internal/socket/defs_solaris.go | 44 - .../golang.org/x/net/internal/socket/empty.s | 7 - .../x/net/internal/socket/error_unix.go | 31 - .../x/net/internal/socket/error_windows.go | 26 - .../x/net/internal/socket/iovec_32bit.go | 19 - .../x/net/internal/socket/iovec_64bit.go | 19 - .../internal/socket/iovec_solaris_64bit.go | 19 - .../x/net/internal/socket/iovec_stub.go | 11 - .../x/net/internal/socket/mmsghdr_stub.go | 21 - .../x/net/internal/socket/mmsghdr_unix.go | 42 - .../x/net/internal/socket/msghdr_bsd.go | 39 - .../x/net/internal/socket/msghdr_bsdvar.go | 16 - .../x/net/internal/socket/msghdr_linux.go | 36 - .../net/internal/socket/msghdr_linux_32bit.go | 24 - .../net/internal/socket/msghdr_linux_64bit.go | 24 - .../x/net/internal/socket/msghdr_openbsd.go | 14 - .../internal/socket/msghdr_solaris_64bit.go | 36 - .../x/net/internal/socket/msghdr_stub.go | 14 - .../x/net/internal/socket/rawconn.go | 66 - .../x/net/internal/socket/rawconn_mmsg.go | 74 - .../x/net/internal/socket/rawconn_msg.go | 77 - .../x/net/internal/socket/rawconn_nommsg.go | 18 - .../x/net/internal/socket/rawconn_nomsg.go | 18 - .../x/net/internal/socket/rawconn_stub.go | 25 - .../x/net/internal/socket/reflect.go | 62 - .../x/net/internal/socket/socket.go | 285 -- .../golang.org/x/net/internal/socket/sys.go | 33 - .../x/net/internal/socket/sys_bsd.go | 17 - .../x/net/internal/socket/sys_bsdvar.go | 20 - .../x/net/internal/socket/sys_darwin.go | 7 - .../x/net/internal/socket/sys_dragonfly.go | 7 - .../net/internal/socket/sys_go1_11_darwin.go | 33 - .../net/internal/socket/sys_go1_12_darwin.go | 42 - .../x/net/internal/socket/sys_linux.go | 27 - .../x/net/internal/socket/sys_linux_386.go | 55 - .../x/net/internal/socket/sys_linux_386.s | 11 - .../x/net/internal/socket/sys_linux_amd64.go | 10 - .../x/net/internal/socket/sys_linux_arm.go | 10 - .../x/net/internal/socket/sys_linux_arm64.go | 10 - .../x/net/internal/socket/sys_linux_mips.go | 10 - .../x/net/internal/socket/sys_linux_mips64.go | 10 - .../net/internal/socket/sys_linux_mips64le.go | 10 - .../x/net/internal/socket/sys_linux_mipsle.go | 10 - .../x/net/internal/socket/sys_linux_ppc64.go | 10 - .../net/internal/socket/sys_linux_ppc64le.go | 10 - .../x/net/internal/socket/sys_linux_s390x.go | 55 - .../x/net/internal/socket/sys_linux_s390x.s | 11 - .../x/net/internal/socket/sys_netbsd.go | 25 - .../x/net/internal/socket/sys_posix.go | 184 - .../x/net/internal/socket/sys_solaris.go | 71 - .../x/net/internal/socket/sys_solaris_amd64.s | 11 - .../x/net/internal/socket/sys_stub.go | 64 - .../x/net/internal/socket/sys_unix.go | 33 - .../x/net/internal/socket/sys_windows.go | 70 - .../x/net/internal/socket/zsys_darwin_386.go | 59 - .../net/internal/socket/zsys_darwin_amd64.go | 61 - .../x/net/internal/socket/zsys_darwin_arm.go | 59 - .../net/internal/socket/zsys_darwin_arm64.go | 61 - .../internal/socket/zsys_dragonfly_amd64.go | 61 - .../x/net/internal/socket/zsys_freebsd_386.go | 59 - .../net/internal/socket/zsys_freebsd_amd64.go | 61 - .../x/net/internal/socket/zsys_freebsd_arm.go | 59 - .../x/net/internal/socket/zsys_linux_386.go | 63 - .../x/net/internal/socket/zsys_linux_amd64.go | 66 - .../x/net/internal/socket/zsys_linux_arm.go | 63 - .../x/net/internal/socket/zsys_linux_arm64.go | 66 - .../x/net/internal/socket/zsys_linux_mips.go | 63 - .../net/internal/socket/zsys_linux_mips64.go | 66 - .../internal/socket/zsys_linux_mips64le.go | 66 - .../net/internal/socket/zsys_linux_mipsle.go | 63 - .../x/net/internal/socket/zsys_linux_ppc64.go | 66 - .../net/internal/socket/zsys_linux_ppc64le.go | 66 - .../x/net/internal/socket/zsys_linux_s390x.go | 66 - .../x/net/internal/socket/zsys_netbsd_386.go | 65 - .../net/internal/socket/zsys_netbsd_amd64.go | 68 - .../x/net/internal/socket/zsys_netbsd_arm.go | 65 - .../x/net/internal/socket/zsys_openbsd_386.go | 59 - .../net/internal/socket/zsys_openbsd_amd64.go | 61 - .../x/net/internal/socket/zsys_openbsd_arm.go | 59 - .../net/internal/socket/zsys_solaris_amd64.go | 60 - vendor/golang.org/x/net/ipv4/batch.go | 190 - vendor/golang.org/x/net/ipv4/control.go | 144 - vendor/golang.org/x/net/ipv4/control_bsd.go | 40 - .../golang.org/x/net/ipv4/control_pktinfo.go | 39 - vendor/golang.org/x/net/ipv4/control_stub.go | 13 - vendor/golang.org/x/net/ipv4/control_unix.go | 73 - .../golang.org/x/net/ipv4/control_windows.go | 16 - vendor/golang.org/x/net/ipv4/defs_darwin.go | 77 - .../golang.org/x/net/ipv4/defs_dragonfly.go | 38 - vendor/golang.org/x/net/ipv4/defs_freebsd.go | 75 - vendor/golang.org/x/net/ipv4/defs_linux.go | 122 - vendor/golang.org/x/net/ipv4/defs_netbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_openbsd.go | 37 - vendor/golang.org/x/net/ipv4/defs_solaris.go | 84 - vendor/golang.org/x/net/ipv4/dgramopt.go | 264 -- vendor/golang.org/x/net/ipv4/doc.go | 245 -- vendor/golang.org/x/net/ipv4/endpoint.go | 186 - vendor/golang.org/x/net/ipv4/gen.go | 199 - vendor/golang.org/x/net/ipv4/genericopt.go | 55 - vendor/golang.org/x/net/ipv4/header.go | 170 - vendor/golang.org/x/net/ipv4/helper.go | 64 - vendor/golang.org/x/net/ipv4/iana.go | 38 - vendor/golang.org/x/net/ipv4/icmp.go | 57 - vendor/golang.org/x/net/ipv4/icmp_linux.go | 25 - vendor/golang.org/x/net/ipv4/icmp_stub.go | 25 - vendor/golang.org/x/net/ipv4/packet.go | 68 - vendor/golang.org/x/net/ipv4/packet_go1_8.go | 56 - vendor/golang.org/x/net/ipv4/packet_go1_9.go | 67 - vendor/golang.org/x/net/ipv4/payload.go | 23 - vendor/golang.org/x/net/ipv4/payload_cmsg.go | 33 - .../x/net/ipv4/payload_cmsg_go1_8.go | 59 - .../x/net/ipv4/payload_cmsg_go1_9.go | 67 - .../golang.org/x/net/ipv4/payload_nocmsg.go | 39 - vendor/golang.org/x/net/ipv4/sockopt.go | 44 - vendor/golang.org/x/net/ipv4/sockopt_posix.go | 71 - vendor/golang.org/x/net/ipv4/sockopt_stub.go | 42 - vendor/golang.org/x/net/ipv4/sys_asmreq.go | 119 - .../golang.org/x/net/ipv4/sys_asmreq_stub.go | 25 - vendor/golang.org/x/net/ipv4/sys_asmreqn.go | 42 - .../golang.org/x/net/ipv4/sys_asmreqn_stub.go | 21 - vendor/golang.org/x/net/ipv4/sys_bpf.go | 23 - vendor/golang.org/x/net/ipv4/sys_bpf_stub.go | 16 - vendor/golang.org/x/net/ipv4/sys_bsd.go | 37 - vendor/golang.org/x/net/ipv4/sys_darwin.go | 93 - vendor/golang.org/x/net/ipv4/sys_dragonfly.go | 35 - vendor/golang.org/x/net/ipv4/sys_freebsd.go | 76 - vendor/golang.org/x/net/ipv4/sys_linux.go | 59 - vendor/golang.org/x/net/ipv4/sys_solaris.go | 57 - vendor/golang.org/x/net/ipv4/sys_ssmreq.go | 54 - .../golang.org/x/net/ipv4/sys_ssmreq_stub.go | 21 - vendor/golang.org/x/net/ipv4/sys_stub.go | 13 - vendor/golang.org/x/net/ipv4/sys_windows.go | 67 - vendor/golang.org/x/net/ipv4/zsys_darwin.go | 99 - .../golang.org/x/net/ipv4/zsys_dragonfly.go | 31 - .../golang.org/x/net/ipv4/zsys_freebsd_386.go | 93 - .../x/net/ipv4/zsys_freebsd_amd64.go | 95 - .../golang.org/x/net/ipv4/zsys_freebsd_arm.go | 95 - .../golang.org/x/net/ipv4/zsys_linux_386.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_amd64.go | 150 - .../golang.org/x/net/ipv4/zsys_linux_arm.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_arm64.go | 150 - .../golang.org/x/net/ipv4/zsys_linux_mips.go | 148 - .../x/net/ipv4/zsys_linux_mips64.go | 150 - .../x/net/ipv4/zsys_linux_mips64le.go | 150 - .../x/net/ipv4/zsys_linux_mipsle.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_ppc.go | 148 - .../golang.org/x/net/ipv4/zsys_linux_ppc64.go | 150 - .../x/net/ipv4/zsys_linux_ppc64le.go | 150 - .../golang.org/x/net/ipv4/zsys_linux_s390x.go | 150 - vendor/golang.org/x/net/ipv4/zsys_netbsd.go | 30 - vendor/golang.org/x/net/ipv4/zsys_openbsd.go | 30 - vendor/golang.org/x/net/ipv4/zsys_solaris.go | 100 - vendor/golang.org/x/net/ipv6/batch.go | 118 - vendor/golang.org/x/net/ipv6/control.go | 187 - .../x/net/ipv6/control_rfc2292_unix.go | 48 - .../x/net/ipv6/control_rfc3542_unix.go | 94 - vendor/golang.org/x/net/ipv6/control_stub.go | 13 - vendor/golang.org/x/net/ipv6/control_unix.go | 55 - .../golang.org/x/net/ipv6/control_windows.go | 16 - vendor/golang.org/x/net/ipv6/defs_darwin.go | 112 - .../golang.org/x/net/ipv6/defs_dragonfly.go | 84 - vendor/golang.org/x/net/ipv6/defs_freebsd.go | 105 - vendor/golang.org/x/net/ipv6/defs_linux.go | 147 - vendor/golang.org/x/net/ipv6/defs_netbsd.go | 80 - vendor/golang.org/x/net/ipv6/defs_openbsd.go | 89 - vendor/golang.org/x/net/ipv6/defs_solaris.go | 114 - vendor/golang.org/x/net/ipv6/dgramopt.go | 301 -- vendor/golang.org/x/net/ipv6/doc.go | 244 -- vendor/golang.org/x/net/ipv6/endpoint.go | 127 - vendor/golang.org/x/net/ipv6/gen.go | 199 - vendor/golang.org/x/net/ipv6/genericopt.go | 56 - vendor/golang.org/x/net/ipv6/header.go | 55 - vendor/golang.org/x/net/ipv6/helper.go | 58 - vendor/golang.org/x/net/ipv6/iana.go | 86 - vendor/golang.org/x/net/ipv6/icmp.go | 60 - vendor/golang.org/x/net/ipv6/icmp_bsd.go | 29 - vendor/golang.org/x/net/ipv6/icmp_linux.go | 27 - vendor/golang.org/x/net/ipv6/icmp_solaris.go | 27 - vendor/golang.org/x/net/ipv6/icmp_stub.go | 23 - vendor/golang.org/x/net/ipv6/icmp_windows.go | 22 - vendor/golang.org/x/net/ipv6/payload.go | 23 - vendor/golang.org/x/net/ipv6/payload_cmsg.go | 32 - .../x/net/ipv6/payload_cmsg_go1_8.go | 55 - .../x/net/ipv6/payload_cmsg_go1_9.go | 57 - .../golang.org/x/net/ipv6/payload_nocmsg.go | 38 - vendor/golang.org/x/net/ipv6/sockopt.go | 43 - vendor/golang.org/x/net/ipv6/sockopt_posix.go | 87 - vendor/golang.org/x/net/ipv6/sockopt_stub.go | 46 - vendor/golang.org/x/net/ipv6/sys_asmreq.go | 24 - .../golang.org/x/net/ipv6/sys_asmreq_stub.go | 17 - vendor/golang.org/x/net/ipv6/sys_bpf.go | 23 - vendor/golang.org/x/net/ipv6/sys_bpf_stub.go | 16 - vendor/golang.org/x/net/ipv6/sys_bsd.go | 57 - vendor/golang.org/x/net/ipv6/sys_darwin.go | 106 - vendor/golang.org/x/net/ipv6/sys_freebsd.go | 92 - vendor/golang.org/x/net/ipv6/sys_linux.go | 74 - vendor/golang.org/x/net/ipv6/sys_solaris.go | 74 - vendor/golang.org/x/net/ipv6/sys_ssmreq.go | 54 - .../golang.org/x/net/ipv6/sys_ssmreq_stub.go | 21 - vendor/golang.org/x/net/ipv6/sys_stub.go | 13 - vendor/golang.org/x/net/ipv6/sys_windows.go | 75 - vendor/golang.org/x/net/ipv6/zsys_darwin.go | 131 - .../golang.org/x/net/ipv6/zsys_dragonfly.go | 88 - .../golang.org/x/net/ipv6/zsys_freebsd_386.go | 122 - .../x/net/ipv6/zsys_freebsd_amd64.go | 124 - .../golang.org/x/net/ipv6/zsys_freebsd_arm.go | 124 - .../golang.org/x/net/ipv6/zsys_linux_386.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_amd64.go | 172 - .../golang.org/x/net/ipv6/zsys_linux_arm.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_arm64.go | 172 - .../golang.org/x/net/ipv6/zsys_linux_mips.go | 170 - .../x/net/ipv6/zsys_linux_mips64.go | 172 - .../x/net/ipv6/zsys_linux_mips64le.go | 172 - .../x/net/ipv6/zsys_linux_mipsle.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_ppc.go | 170 - .../golang.org/x/net/ipv6/zsys_linux_ppc64.go | 172 - .../x/net/ipv6/zsys_linux_ppc64le.go | 172 - .../golang.org/x/net/ipv6/zsys_linux_s390x.go | 172 - vendor/golang.org/x/net/ipv6/zsys_netbsd.go | 84 - vendor/golang.org/x/net/ipv6/zsys_openbsd.go | 93 - vendor/golang.org/x/net/ipv6/zsys_solaris.go | 131 - 327 files changed, 77 insertions(+), 50403 deletions(-) rename vendor/github.com/{dyatlov/go-opengraph => CovenantSQL/beacon}/LICENSE (94%) create mode 100644 vendor/github.com/CovenantSQL/beacon/ipv6/ipv6.go delete mode 100644 vendor/github.com/CovenantSQL/xurls/LICENSE delete mode 100644 vendor/github.com/CovenantSQL/xurls/README.md delete mode 100644 vendor/github.com/CovenantSQL/xurls/schemes.go delete mode 100644 vendor/github.com/CovenantSQL/xurls/tlds.go delete mode 100644 vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go delete mode 100644 vendor/github.com/CovenantSQL/xurls/xurls.go delete mode 100644 vendor/github.com/beevik/ntp/CONTRIBUTORS delete mode 100644 vendor/github.com/beevik/ntp/LICENSE delete mode 100644 vendor/github.com/beevik/ntp/README.md delete mode 100644 vendor/github.com/beevik/ntp/RELEASE_NOTES.md delete mode 100644 vendor/github.com/beevik/ntp/ntp.go delete mode 100644 vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go delete mode 100644 vendor/github.com/miekg/dns/.codecov.yml delete mode 100644 vendor/github.com/miekg/dns/AUTHORS delete mode 100644 vendor/github.com/miekg/dns/CONTRIBUTORS delete mode 100644 vendor/github.com/miekg/dns/COPYRIGHT delete mode 100644 vendor/github.com/miekg/dns/Gopkg.lock delete mode 100644 vendor/github.com/miekg/dns/Gopkg.toml delete mode 100644 vendor/github.com/miekg/dns/LICENSE delete mode 100644 vendor/github.com/miekg/dns/Makefile.fuzz delete mode 100644 vendor/github.com/miekg/dns/Makefile.release delete mode 100644 vendor/github.com/miekg/dns/README.md delete mode 100644 vendor/github.com/miekg/dns/acceptfunc.go delete mode 100644 vendor/github.com/miekg/dns/client.go delete mode 100644 vendor/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/github.com/miekg/dns/dane.go delete mode 100644 vendor/github.com/miekg/dns/defaults.go delete mode 100644 vendor/github.com/miekg/dns/dns.go delete mode 100644 vendor/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/github.com/miekg/dns/doc.go delete mode 100644 vendor/github.com/miekg/dns/duplicate.go delete mode 100644 vendor/github.com/miekg/dns/duplicate_generate.go delete mode 100644 vendor/github.com/miekg/dns/edns.go delete mode 100644 vendor/github.com/miekg/dns/format.go delete mode 100644 vendor/github.com/miekg/dns/fuzz.go delete mode 100644 vendor/github.com/miekg/dns/generate.go delete mode 100644 vendor/github.com/miekg/dns/labels.go delete mode 100644 vendor/github.com/miekg/dns/listen_go111.go delete mode 100644 vendor/github.com/miekg/dns/listen_go_not111.go delete mode 100644 vendor/github.com/miekg/dns/msg.go delete mode 100644 vendor/github.com/miekg/dns/msg_generate.go delete mode 100644 vendor/github.com/miekg/dns/msg_helpers.go delete mode 100644 vendor/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/github.com/miekg/dns/reverse.go delete mode 100644 vendor/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/github.com/miekg/dns/scan.go delete mode 100644 vendor/github.com/miekg/dns/scan_rr.go delete mode 100644 vendor/github.com/miekg/dns/serve_mux.go delete mode 100644 vendor/github.com/miekg/dns/server.go delete mode 100644 vendor/github.com/miekg/dns/sig0.go delete mode 100644 vendor/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/github.com/miekg/dns/smimea.go delete mode 100644 vendor/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/github.com/miekg/dns/tsig.go delete mode 100644 vendor/github.com/miekg/dns/types.go delete mode 100644 vendor/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/github.com/miekg/dns/udp.go delete mode 100644 vendor/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/github.com/miekg/dns/update.go delete mode 100644 vendor/github.com/miekg/dns/version.go delete mode 100644 vendor/github.com/miekg/dns/xfr.go delete mode 100644 vendor/github.com/miekg/dns/zduplicate.go delete mode 100644 vendor/github.com/miekg/dns/zmsg.go delete mode 100644 vendor/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/golang.org/x/net/bpf/asm.go delete mode 100644 vendor/golang.org/x/net/bpf/constants.go delete mode 100644 vendor/golang.org/x/net/bpf/doc.go delete mode 100644 vendor/golang.org/x/net/bpf/instructions.go delete mode 100644 vendor/golang.org/x/net/bpf/setter.go delete mode 100644 vendor/golang.org/x/net/bpf/vm.go delete mode 100644 vendor/golang.org/x/net/bpf/vm_instructions.go delete mode 100644 vendor/golang.org/x/net/html/atom/atom.go delete mode 100644 vendor/golang.org/x/net/html/atom/gen.go delete mode 100644 vendor/golang.org/x/net/html/atom/table.go delete mode 100644 vendor/golang.org/x/net/html/const.go delete mode 100644 vendor/golang.org/x/net/html/doc.go delete mode 100644 vendor/golang.org/x/net/html/doctype.go delete mode 100644 vendor/golang.org/x/net/html/entity.go delete mode 100644 vendor/golang.org/x/net/html/escape.go delete mode 100644 vendor/golang.org/x/net/html/foreign.go delete mode 100644 vendor/golang.org/x/net/html/node.go delete mode 100644 vendor/golang.org/x/net/html/parse.go delete mode 100644 vendor/golang.org/x/net/html/render.go delete mode 100644 vendor/golang.org/x/net/html/token.go delete mode 100644 vendor/golang.org/x/net/internal/iana/const.go delete mode 100644 vendor/golang.org/x/net/internal/iana/gen.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_linux_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/cmsghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/internal/socket/empty.s delete mode 100644 vendor/golang.org/x/net/internal/socket/error_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/error_windows.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/iovec_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/mmsghdr_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_bsdvar.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_32bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_linux_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_openbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_solaris_64bit.go delete mode 100644 vendor/golang.org/x/net/internal/socket/msghdr_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_mmsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_msg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nommsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_nomsg.go delete mode 100644 vendor/golang.org/x/net/internal/socket/rawconn_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/reflect.go delete mode 100644 vendor/golang.org/x/net/internal/socket/socket.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_bsdvar.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_11_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_go1_12_darwin.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_386.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_linux_s390x.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_netbsd.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_posix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_solaris_amd64.s delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_stub.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_unix.go delete mode 100644 vendor/golang.org/x/net/internal/socket/sys_windows.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_darwin_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_dragonfly_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_netbsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_386.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_amd64.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_arm.go delete mode 100644 vendor/golang.org/x/net/internal/socket/zsys_solaris_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/batch.go delete mode 100644 vendor/golang.org/x/net/ipv4/control.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_pktinfo.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_unix.go delete mode 100644 vendor/golang.org/x/net/ipv4/control_windows.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/dgramopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/doc.go delete mode 100644 vendor/golang.org/x/net/ipv4/endpoint.go delete mode 100644 vendor/golang.org/x/net/ipv4/gen.go delete mode 100644 vendor/golang.org/x/net/ipv4/genericopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/header.go delete mode 100644 vendor/golang.org/x/net/ipv4/helper.go delete mode 100644 vendor/golang.org/x/net/ipv4/iana.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/icmp_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/packet.go delete mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_8.go delete mode 100644 vendor/golang.org/x/net/ipv4/packet_go1_9.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_8.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_cmsg_go1_9.go delete mode 100644 vendor/golang.org/x/net/ipv4/payload_nocmsg.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt_posix.go delete mode 100644 vendor/golang.org/x/net/ipv4/sockopt_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_asmreqn_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bpf_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_linux.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_ssmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_stub.go delete mode 100644 vendor/golang.org/x/net/ipv4/sys_windows.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv4/zsys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/batch.go delete mode 100644 vendor/golang.org/x/net/ipv6/control.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_unix.go delete mode 100644 vendor/golang.org/x/net/ipv6/control_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/defs_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/dgramopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/doc.go delete mode 100644 vendor/golang.org/x/net/ipv6/endpoint.go delete mode 100644 vendor/golang.org/x/net/ipv6/gen.go delete mode 100644 vendor/golang.org/x/net/ipv6/genericopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/header.go delete mode 100644 vendor/golang.org/x/net/ipv6/helper.go delete mode 100644 vendor/golang.org/x/net/ipv6/iana.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/icmp_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_8.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_cmsg_go1_9.go delete mode 100644 vendor/golang.org/x/net/ipv6/payload_nocmsg.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt_posix.go delete mode 100644 vendor/golang.org/x/net/ipv6/sockopt_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_asmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bpf_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_bsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_freebsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_linux.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_solaris.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_ssmreq_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_stub.go delete mode 100644 vendor/golang.org/x/net/ipv6/sys_windows.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_darwin.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_dragonfly.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_386.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_netbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_openbsd.go delete mode 100644 vendor/golang.org/x/net/ipv6/zsys_solaris.go diff --git a/Gopkg.lock b/Gopkg.lock index 04e26edb9..820225a79 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -22,6 +22,14 @@ pruneopts = "UT" revision = "ea383e6c5f56c8db09130378d7eaf1631d14ecc0" +[[projects]] + branch = "master" + digest = "1:990258f605348daec943e55cef08be4049f496a6fdd02e0e90dd8d2d5a8ea743" + name = "github.com/CovenantSQL/beacon" + packages = ["ipv6"] + pruneopts = "UT" + revision = "9947ddd83bc5f20a432f6f3142a14ccec6977daf" + [[projects]] branch = "develop" digest = "1:9691c7a98f7d559cf573a98dd19c26f0859d30343e59a6a30e91c3d187a8ad37" @@ -44,14 +52,6 @@ pruneopts = "UT" revision = "8673c64d83a0954a21eaf0158bb2e1eb574304da" -[[projects]] - branch = "master" - digest = "1:ef41f61c18347d6949e94a1f648e47415922afcbe894f955b69921014ff7c4d4" - name = "github.com/CovenantSQL/xurls" - packages = ["."] - pruneopts = "UT" - revision = "b72b1571892216ba2a42fb8abeb726d082b4c4d1" - [[projects]] digest = "1:aa94227e54ee105fcd03f25c85d3e5bbe13d2f2d5eb02379b5b4db5320dfb371" name = "github.com/alecthomas/chroma" @@ -92,14 +92,6 @@ revision = "881a441774f9d707d3b7852025b7f2149a556182" version = "v0.6.2" -[[projects]] - digest = "1:a8d622a8049a4aa420e1c509873bb85d4c45c5107f420d922f919bfcb8d08694" - name = "github.com/beevik/ntp" - packages = ["."] - pruneopts = "UT" - revision = "62c80a04de2086884d8296004b6d74ee1846c582" - version = "v0.2.0" - [[projects]] branch = "master" digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" @@ -167,14 +159,6 @@ revision = "487489b64fb796de2e55f4e8a4ad1e145f80e957" version = "v1.1.6" -[[projects]] - branch = "master" - digest = "1:665c8850f673be8d358fe61ab00412049e685202aa7466ae5c72ad50d89c84f7" - name = "github.com/dyatlov/go-opengraph" - packages = ["opengraph"] - pruneopts = "UT" - revision = "816b6608b3c8c1e871bc9cf777f390e2532081fe" - [[projects]] digest = "1:547f5df5f708c880f0af507317c8d022b8d62053a5cb075e23c6eeed09eb2e4e" name = "github.com/fortytw2/leaktest" @@ -346,14 +330,6 @@ revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" version = "v1.0.1" -[[projects]] - digest = "1:702ff5d8a0196ccb0627f35996efd1081be00c8ae1719402adbffc6e0f8f59ac" - name = "github.com/miekg/dns" - packages = ["."] - pruneopts = "UT" - revision = "7586a3cbe8ccfc63f82de3ab2ceeb08c9939af72" - version = "v1.1.1" - [[projects]] branch = "master" digest = "1:130cefe87d7eeefc824978dcb78e35672d4c49a11f25c153fbf0cfd952756fa3" @@ -650,18 +626,9 @@ [[projects]] branch = "master" - digest = "1:c079de57d24956ee13947278c02201b9cb478438b84d86fb2df5a891ea4a10c7" + digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70" name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "html", - "html/atom", - "internal/iana", - "internal/socket", - "ipv4", - "ipv6", - ] + packages = ["context"] pruneopts = "UT" revision = "927f97764cc334a6575f4b7a1584a147864d5723" @@ -692,16 +659,14 @@ "bazil.org/fuse/fs", "bazil.org/fuse/fs/fstestutil", "github.com/CovenantSQL/HashStablePack/marshalhash", + "github.com/CovenantSQL/beacon/ipv6", "github.com/CovenantSQL/go-sqlite3-encrypt", "github.com/CovenantSQL/sqlparser", - "github.com/CovenantSQL/xurls", - "github.com/beevik/ntp", "github.com/btcsuite/btcd/btcec", "github.com/btcsuite/btcutil/base58", "github.com/coreos/bbolt", "github.com/cyberdelia/go-metrics-graphite", "github.com/davecgh/go-spew/spew", - "github.com/dyatlov/go-opengraph/opengraph", "github.com/fortytw2/leaktest", "github.com/go-gorp/gorp", "github.com/gorilla/handlers", @@ -711,7 +676,6 @@ "github.com/jmoiron/jsonq", "github.com/jordwest/mock-conn", "github.com/lufia/iostat", - "github.com/miekg/dns", "github.com/minio/blake2b-simd", "github.com/mohae/deepcopy", "github.com/pkg/errors", diff --git a/Gopkg.toml b/Gopkg.toml index add554ac4..8afb44725 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -89,3 +89,7 @@ [[constraint]] name = "github.com/rakyll/statik" version = "0.1.5" + +[[constraint]] + branch = "master" + name = "github.com/CovenantSQL/beacon" diff --git a/vendor/github.com/dyatlov/go-opengraph/LICENSE b/vendor/github.com/CovenantSQL/beacon/LICENSE similarity index 94% rename from vendor/github.com/dyatlov/go-opengraph/LICENSE rename to vendor/github.com/CovenantSQL/beacon/LICENSE index 854759ad2..a8cc26e37 100644 --- a/vendor/github.com/dyatlov/go-opengraph/LICENSE +++ b/vendor/github.com/CovenantSQL/beacon/LICENSE @@ -1,6 +1,6 @@ -The MIT License (MIT) +MIT License -Copyright (c) 2015 Vitaly Dyatlov +Copyright (c) 2019 CovenantSQL Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -19,4 +19,3 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/github.com/CovenantSQL/beacon/ipv6/ipv6.go b/vendor/github.com/CovenantSQL/beacon/ipv6/ipv6.go new file mode 100644 index 000000000..0db8e62fd --- /dev/null +++ b/vendor/github.com/CovenantSQL/beacon/ipv6/ipv6.go @@ -0,0 +1,60 @@ +package ipv6 + +import ( + "fmt" + "github.com/pkg/errors" + "net" + "strings" +) + +func ToIPv6(in []byte) (ips [] net.IP, err error) { + if len(in)%net.IPv6len != 0 { + return nil, errors.New("must be n * 16 length") + } + ipCount := len(in) / net.IPv6len + ips = make([]net.IP, ipCount) + for i := 0; i < ipCount; i ++ { + ips[i] = make(net.IP, net.IPv6len) + copy(ips[i], in[i*net.IPv6len:(i+1)*net.IPv6len]) + } + return +} + +func FromIPv6(ips []net.IP) (out []byte, err error) { + ipCount := len(ips) + out = make([]byte, ipCount * net.IPv6len) + for i := 0; i < ipCount; i ++ { + copy(out[i*net.IPv6len:(i+1)*net.IPv6len], ips[i]) + } + + return +} + +func FromDomain(domain string) (out []byte, err error) { + var ips []net.IP + allIPv6 := make([]net.IP, 0, 4) + for i := 0; ; i++ { + ips, err = net.LookupIP(fmt.Sprintf("%02d.%s", i, domain)) + if err != nil { + if _, ok := err.(*net.DNSError); ok && strings.Contains(err.Error(), "no such host") { + break + } else { + return + } + } else { + if len(ips) == 0 { + return nil, errors.New("empty IP list") + } + if len(ips[0]) != net.IPv6len { + return nil, errors.Errorf("unexpected IP: %s", ips[0]) + } + allIPv6 = append(allIPv6, ips[0]) + } + + } + out, err = FromIPv6(allIPv6) + if err != nil { + return nil, errors.Errorf("convert from IPv6 failed: %v", err) + } + return +} \ No newline at end of file diff --git a/vendor/github.com/CovenantSQL/xurls/LICENSE b/vendor/github.com/CovenantSQL/xurls/LICENSE deleted file mode 100644 index 7d71d51a5..000000000 --- a/vendor/github.com/CovenantSQL/xurls/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015, Daniel Martí. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/CovenantSQL/xurls/README.md b/vendor/github.com/CovenantSQL/xurls/README.md deleted file mode 100644 index d74d1b37f..000000000 --- a/vendor/github.com/CovenantSQL/xurls/README.md +++ /dev/null @@ -1,30 +0,0 @@ -# xurls - -[![GoDoc](https://godoc.org/mvdan.cc/xurls?status.svg)](https://godoc.org/mvdan.cc/xurls) -[![Travis](https://travis-ci.org/mvdan/xurls.svg?branch=master)](https://travis-ci.org/mvdan/xurls) - -Extract urls from text using regular expressions. - - go get -u mvdan.cc/xurls - -```go -import "mvdan.cc/xurls" - -func main() { - xurls.Relaxed().FindString("Do gophers live in golang.org?") - // "golang.org" - xurls.Strict().FindAllString("foo.com is http://foo.com/.", -1) - // []string{"http://foo.com/"} -} -``` - -Note that the funcs compile regexes, so avoid calling them repeatedly. - -#### cmd/xurls - - go get -u mvdan.cc/xurls/cmd/xurls - -```shell -$ echo "Do gophers live in http://golang.org?" | xurls -http://golang.org -``` diff --git a/vendor/github.com/CovenantSQL/xurls/schemes.go b/vendor/github.com/CovenantSQL/xurls/schemes.go deleted file mode 100644 index a0c0fdf8e..000000000 --- a/vendor/github.com/CovenantSQL/xurls/schemes.go +++ /dev/null @@ -1,298 +0,0 @@ -// Generated by schemesgen - -package xurls - -// Schemes is a sorted list of all IANA assigned schemes. -// -// Source: -// https://www.iana.org/assignments/uri-schemes/uri-schemes-1.csv -var Schemes = []string{ - `aaa`, - `aaas`, - `about`, - `acap`, - `acct`, - `acr`, - `adiumxtra`, - `afp`, - `afs`, - `aim`, - `appdata`, - `apt`, - `attachment`, - `aw`, - `barion`, - `beshare`, - `bitcoin`, - `bitcoincash`, - `blob`, - `bolo`, - `browserext`, - `callto`, - `cap`, - `chrome`, - `chrome-extension`, - `cid`, - `coap`, - `coap+tcp`, - `coap+ws`, - `coaps`, - `coaps+tcp`, - `coaps+ws`, - `com-eventbrite-attendee`, - `content`, - `conti`, - `crid`, - `cvs`, - `data`, - `dav`, - `diaspora`, - `dict`, - `did`, - `dis`, - `dlna-playcontainer`, - `dlna-playsingle`, - `dns`, - `dntp`, - `dtn`, - `dvb`, - `ed2k`, - `elsi`, - `example`, - `facetime`, - `fax`, - `feed`, - `feedready`, - `file`, - `filesystem`, - `finger`, - `fish`, - `ftp`, - `geo`, - `gg`, - `git`, - `gizmoproject`, - `go`, - `gopher`, - `graph`, - `gtalk`, - `h323`, - `ham`, - `hcap`, - `hcp`, - `http`, - `https`, - `hxxp`, - `hxxps`, - `hydrazone`, - `iax`, - `icap`, - `icon`, - `im`, - `imap`, - `info`, - `iotdisco`, - `ipn`, - `ipp`, - `ipps`, - `irc`, - `irc6`, - `ircs`, - `iris`, - `iris.beep`, - `iris.lwz`, - `iris.xpc`, - `iris.xpcs`, - `isostore`, - `itms`, - `jabber`, - `jar`, - `jms`, - `keyparc`, - `lastfm`, - `ldap`, - `ldaps`, - `lvlt`, - `magnet`, - `mailserver`, - `mailto`, - `maps`, - `market`, - `message`, - `microsoft.windows.camera`, - `microsoft.windows.camera.multipicker`, - `microsoft.windows.camera.picker`, - `mid`, - `mms`, - `modem`, - `mongodb`, - `moz`, - `ms-access`, - `ms-browser-extension`, - `ms-drive-to`, - `ms-enrollment`, - `ms-excel`, - `ms-gamebarservices`, - `ms-gamingoverlay`, - `ms-getoffice`, - `ms-help`, - `ms-infopath`, - `ms-inputapp`, - `ms-lockscreencomponent-config`, - `ms-media-stream-id`, - `ms-mixedrealitycapture`, - `ms-officeapp`, - `ms-people`, - `ms-project`, - `ms-powerpoint`, - `ms-publisher`, - `ms-restoretabcompanion`, - `ms-screenclip`, - `ms-screensketch`, - `ms-search-repair`, - `ms-secondary-screen-controller`, - `ms-secondary-screen-setup`, - `ms-settings`, - `ms-settings-airplanemode`, - `ms-settings-bluetooth`, - `ms-settings-camera`, - `ms-settings-cellular`, - `ms-settings-cloudstorage`, - `ms-settings-connectabledevices`, - `ms-settings-displays-topology`, - `ms-settings-emailandaccounts`, - `ms-settings-language`, - `ms-settings-location`, - `ms-settings-lock`, - `ms-settings-nfctransactions`, - `ms-settings-notifications`, - `ms-settings-power`, - `ms-settings-privacy`, - `ms-settings-proximity`, - `ms-settings-screenrotation`, - `ms-settings-wifi`, - `ms-settings-workplace`, - `ms-spd`, - `ms-sttoverlay`, - `ms-transit-to`, - `ms-useractivityset`, - `ms-virtualtouchpad`, - `ms-visio`, - `ms-walk-to`, - `ms-whiteboard`, - `ms-whiteboard-cmd`, - `ms-word`, - `msnim`, - `msrp`, - `msrps`, - `mtqp`, - `mumble`, - `mupdate`, - `mvn`, - `news`, - `nfs`, - `ni`, - `nih`, - `nntp`, - `notes`, - `ocf`, - `oid`, - `onenote`, - `onenote-cmd`, - `opaquelocktoken`, - `openpgp4fpr`, - `pack`, - `palm`, - `paparazzi`, - `pkcs11`, - `platform`, - `pop`, - `pres`, - `prospero`, - `proxy`, - `pwid`, - `psyc`, - `qb`, - `query`, - `redis`, - `rediss`, - `reload`, - `res`, - `resource`, - `rmi`, - `rsync`, - `rtmfp`, - `rtmp`, - `rtsp`, - `rtsps`, - `rtspu`, - `secondlife`, - `service`, - `session`, - `sftp`, - `sgn`, - `shttp`, - `sieve`, - `simpleledger`, - `sip`, - `sips`, - `skype`, - `smb`, - `sms`, - `smtp`, - `snews`, - `snmp`, - `soap.beep`, - `soap.beeps`, - `soldat`, - `spiffe`, - `spotify`, - `ssh`, - `steam`, - `stun`, - `stuns`, - `submit`, - `svn`, - `tag`, - `teamspeak`, - `tel`, - `teliaeid`, - `telnet`, - `tftp`, - `things`, - `thismessage`, - `tip`, - `tn3270`, - `tool`, - `turn`, - `turns`, - `tv`, - `udp`, - `unreal`, - `urn`, - `ut2004`, - `v-event`, - `vemmi`, - `ventrilo`, - `videotex`, - `vnc`, - `view-source`, - `wais`, - `webcal`, - `wpid`, - `ws`, - `wss`, - `wtai`, - `wyciwyg`, - `xcon`, - `xcon-userid`, - `xfire`, - `xmlrpc.beep`, - `xmlrpc.beeps`, - `xmpp`, - `xri`, - `ymsgr`, - `z39.50`, - `z39.50r`, - `z39.50s`, -} diff --git a/vendor/github.com/CovenantSQL/xurls/tlds.go b/vendor/github.com/CovenantSQL/xurls/tlds.go deleted file mode 100644 index 73837d0c1..000000000 --- a/vendor/github.com/CovenantSQL/xurls/tlds.go +++ /dev/null @@ -1,1562 +0,0 @@ -// Generated by tldsgen - -package xurls - -// TLDs is a sorted list of all public top-level domains. -// -// Sources: -// * https://data.iana.org/TLD/tlds-alpha-by-domain.txt -// * https://publicsuffix.org/list/effective_tld_names.dat -var TLDs = []string{ - `aaa`, - `aarp`, - `abarth`, - `abb`, - `abbott`, - `abbvie`, - `abc`, - `able`, - `abogado`, - `abudhabi`, - `ac`, - `academy`, - `accenture`, - `accountant`, - `accountants`, - `aco`, - `active`, - `actor`, - `ad`, - `adac`, - `ads`, - `adult`, - `ae`, - `aeg`, - `aero`, - `aetna`, - `af`, - `afamilycompany`, - `afl`, - `africa`, - `ag`, - `agakhan`, - `agency`, - `ai`, - `aig`, - `aigo`, - `airbus`, - `airforce`, - `airtel`, - `akdn`, - `al`, - `alfaromeo`, - `alibaba`, - `alipay`, - `allfinanz`, - `allstate`, - `ally`, - `alsace`, - `alstom`, - `am`, - `americanexpress`, - `americanfamily`, - `amex`, - `amfam`, - `amica`, - `amsterdam`, - `analytics`, - `android`, - `anquan`, - `anz`, - `ao`, - `aol`, - `apartments`, - `app`, - `apple`, - `aq`, - `aquarelle`, - `ar`, - `arab`, - `aramco`, - `archi`, - `army`, - `arpa`, - `art`, - `arte`, - `as`, - `asda`, - `asia`, - `associates`, - `at`, - `athleta`, - `attorney`, - `au`, - `auction`, - `audi`, - `audible`, - `audio`, - `auspost`, - `author`, - `auto`, - `autos`, - `avianca`, - `aw`, - `aws`, - `ax`, - `axa`, - `az`, - `azure`, - `ba`, - `baby`, - `baidu`, - `banamex`, - `bananarepublic`, - `band`, - `bank`, - `bar`, - `barcelona`, - `barclaycard`, - `barclays`, - `barefoot`, - `bargains`, - `baseball`, - `basketball`, - `bauhaus`, - `bayern`, - `bb`, - `bbc`, - `bbt`, - `bbva`, - `bcg`, - `bcn`, - `bd`, - `be`, - `beats`, - `beauty`, - `beer`, - `bentley`, - `berlin`, - `best`, - `bestbuy`, - `bet`, - `bf`, - `bg`, - `bh`, - `bharti`, - `bi`, - `bible`, - `bid`, - `bike`, - `bing`, - `bingo`, - `bio`, - `biz`, - `bj`, - `black`, - `blackfriday`, - `blanco`, - `blockbuster`, - `blog`, - `bloomberg`, - `blue`, - `bm`, - `bms`, - `bmw`, - `bn`, - `bnl`, - `bnpparibas`, - `bo`, - `boats`, - `boehringer`, - `bofa`, - `bom`, - `bond`, - `boo`, - `book`, - `booking`, - `bosch`, - `bostik`, - `boston`, - `bot`, - `boutique`, - `box`, - `br`, - `bradesco`, - `bridgestone`, - `broadway`, - `broker`, - `brother`, - `brussels`, - `bs`, - `bt`, - `budapest`, - `bugatti`, - `build`, - `builders`, - `business`, - `buy`, - `buzz`, - `bv`, - `bw`, - `by`, - `bz`, - `bzh`, - `ca`, - `cab`, - `cafe`, - `cal`, - `call`, - `calvinklein`, - `cam`, - `camera`, - `camp`, - `cancerresearch`, - `canon`, - `capetown`, - `capital`, - `capitalone`, - `car`, - `caravan`, - `cards`, - `care`, - `career`, - `careers`, - `cars`, - `cartier`, - `casa`, - `case`, - `caseih`, - `cash`, - `casino`, - `cat`, - `catering`, - `catholic`, - `cba`, - `cbn`, - `cbre`, - `cbs`, - `cc`, - `cd`, - `ceb`, - `center`, - `ceo`, - `cern`, - `cf`, - `cfa`, - `cfd`, - `cg`, - `ch`, - `chanel`, - `channel`, - `charity`, - `chase`, - `chat`, - `cheap`, - `chintai`, - `christmas`, - `chrome`, - `chrysler`, - `church`, - `ci`, - `cipriani`, - `circle`, - `cisco`, - `citadel`, - `citi`, - `citic`, - `city`, - `cityeats`, - `ck`, - `cl`, - `claims`, - `cleaning`, - `click`, - `clinic`, - `clinique`, - `clothing`, - `cloud`, - `club`, - `clubmed`, - `cm`, - `cn`, - `co`, - `coach`, - `codes`, - `coffee`, - `college`, - `cologne`, - `com`, - `comcast`, - `commbank`, - `community`, - `company`, - `compare`, - `computer`, - `comsec`, - `condos`, - `construction`, - `consulting`, - `contact`, - `contractors`, - `cooking`, - `cookingchannel`, - `cool`, - `coop`, - `corsica`, - `country`, - `coupon`, - `coupons`, - `courses`, - `cr`, - `credit`, - `creditcard`, - `creditunion`, - `cricket`, - `crown`, - `crs`, - `cruise`, - `cruises`, - `csc`, - `cu`, - `cuisinella`, - `cv`, - `cw`, - `cx`, - `cy`, - `cymru`, - `cyou`, - `cz`, - `dabur`, - `dad`, - `dance`, - `data`, - `date`, - `dating`, - `datsun`, - `day`, - `dclk`, - `dds`, - `de`, - `deal`, - `dealer`, - `deals`, - `degree`, - `delivery`, - `dell`, - `deloitte`, - `delta`, - `democrat`, - `dental`, - `dentist`, - `desi`, - `design`, - `dev`, - `dhl`, - `diamonds`, - `diet`, - `digital`, - `direct`, - `directory`, - `discount`, - `discover`, - `dish`, - `diy`, - `dj`, - `dk`, - `dm`, - `dnp`, - `do`, - `docs`, - `doctor`, - `dodge`, - `dog`, - `doha`, - `domains`, - `dot`, - `download`, - `drive`, - `dtv`, - `dubai`, - `duck`, - `dunlop`, - `duns`, - `dupont`, - `durban`, - `dvag`, - `dvr`, - `dz`, - `earth`, - `eat`, - `ec`, - `eco`, - `edeka`, - `edu`, - `education`, - `ee`, - `eg`, - `email`, - `emerck`, - `energy`, - `engineer`, - `engineering`, - `enterprises`, - `epost`, - `epson`, - `equipment`, - `er`, - `ericsson`, - `erni`, - `es`, - `esq`, - `estate`, - `esurance`, - `et`, - `etisalat`, - `eu`, - `eurovision`, - `eus`, - `events`, - `everbank`, - `exchange`, - `expert`, - `exposed`, - `express`, - `extraspace`, - `fage`, - `fail`, - `fairwinds`, - `faith`, - `family`, - `fan`, - `fans`, - `farm`, - `farmers`, - `fashion`, - `fast`, - `fedex`, - `feedback`, - `ferrari`, - `ferrero`, - `fi`, - `fiat`, - `fidelity`, - `fido`, - `film`, - `final`, - `finance`, - `financial`, - `fire`, - `firestone`, - `firmdale`, - `fish`, - `fishing`, - `fit`, - `fitness`, - `fj`, - `fk`, - `flickr`, - `flights`, - `flir`, - `florist`, - `flowers`, - `fly`, - `fm`, - `fo`, - `foo`, - `food`, - `foodnetwork`, - `football`, - `ford`, - `forex`, - `forsale`, - `forum`, - `foundation`, - `fox`, - `fr`, - `free`, - `fresenius`, - `frl`, - `frogans`, - `frontdoor`, - `frontier`, - `ftr`, - `fujitsu`, - `fujixerox`, - `fun`, - `fund`, - `furniture`, - `futbol`, - `fyi`, - `ga`, - `gal`, - `gallery`, - `gallo`, - `gallup`, - `game`, - `games`, - `gap`, - `garden`, - `gb`, - `gbiz`, - `gd`, - `gdn`, - `ge`, - `gea`, - `gent`, - `genting`, - `george`, - `gf`, - `gg`, - `ggee`, - `gh`, - `gi`, - `gift`, - `gifts`, - `gives`, - `giving`, - `gl`, - `glade`, - `glass`, - `gle`, - `global`, - `globo`, - `gm`, - `gmail`, - `gmbh`, - `gmo`, - `gmx`, - `gn`, - `godaddy`, - `gold`, - `goldpoint`, - `golf`, - `goo`, - `goodhands`, - `goodyear`, - `goog`, - `google`, - `gop`, - `got`, - `gov`, - `gp`, - `gq`, - `gr`, - `grainger`, - `graphics`, - `gratis`, - `green`, - `gripe`, - `grocery`, - `group`, - `gs`, - `gt`, - `gu`, - `guardian`, - `gucci`, - `guge`, - `guide`, - `guitars`, - `guru`, - `gw`, - `gy`, - `hair`, - `hamburg`, - `hangout`, - `haus`, - `hbo`, - `hdfc`, - `hdfcbank`, - `health`, - `healthcare`, - `help`, - `helsinki`, - `here`, - `hermes`, - `hgtv`, - `hiphop`, - `hisamitsu`, - `hitachi`, - `hiv`, - `hk`, - `hkt`, - `hm`, - `hn`, - `hockey`, - `holdings`, - `holiday`, - `homedepot`, - `homegoods`, - `homes`, - `homesense`, - `honda`, - `honeywell`, - `horse`, - `hospital`, - `host`, - `hosting`, - `hot`, - `hoteles`, - `hotels`, - `hotmail`, - `house`, - `how`, - `hr`, - `hsbc`, - `ht`, - `hu`, - `hughes`, - `hyatt`, - `hyundai`, - `ibm`, - `icbc`, - `ice`, - `icu`, - `id`, - `ie`, - `ieee`, - `ifm`, - `ikano`, - `il`, - `im`, - `imamat`, - `imdb`, - `immo`, - `immobilien`, - `in`, - `inc`, - `industries`, - `infiniti`, - `info`, - `ing`, - `ink`, - `institute`, - `insurance`, - `insure`, - `int`, - `intel`, - `international`, - `intuit`, - `investments`, - `io`, - `ipiranga`, - `iq`, - `ir`, - `irish`, - `is`, - `iselect`, - `ismaili`, - `ist`, - `istanbul`, - `it`, - `itau`, - `itv`, - `iveco`, - `jaguar`, - `java`, - `jcb`, - `jcp`, - `je`, - `jeep`, - `jetzt`, - `jewelry`, - `jio`, - `jlc`, - `jll`, - `jm`, - `jmp`, - `jnj`, - `jo`, - `jobs`, - `joburg`, - `jot`, - `joy`, - `jp`, - `jpmorgan`, - `jprs`, - `juegos`, - `juniper`, - `kaufen`, - `kddi`, - `ke`, - `kerryhotels`, - `kerrylogistics`, - `kerryproperties`, - `kfh`, - `kg`, - `kh`, - `ki`, - `kia`, - `kim`, - `kinder`, - `kindle`, - `kitchen`, - `kiwi`, - `km`, - `kn`, - `koeln`, - `komatsu`, - `kosher`, - `kp`, - `kpmg`, - `kpn`, - `kr`, - `krd`, - `kred`, - `kuokgroup`, - `kw`, - `ky`, - `kyoto`, - `kz`, - `la`, - `lacaixa`, - `ladbrokes`, - `lamborghini`, - `lamer`, - `lancaster`, - `lancia`, - `lancome`, - `land`, - `landrover`, - `lanxess`, - `lasalle`, - `lat`, - `latino`, - `latrobe`, - `law`, - `lawyer`, - `lb`, - `lc`, - `lds`, - `lease`, - `leclerc`, - `lefrak`, - `legal`, - `lego`, - `lexus`, - `lgbt`, - `li`, - `liaison`, - `lidl`, - `life`, - `lifeinsurance`, - `lifestyle`, - `lighting`, - `like`, - `lilly`, - `limited`, - `limo`, - `lincoln`, - `linde`, - `link`, - `lipsy`, - `live`, - `living`, - `lixil`, - `lk`, - `llc`, - `loan`, - `loans`, - `locker`, - `locus`, - `loft`, - `lol`, - `london`, - `lotte`, - `lotto`, - `love`, - `lpl`, - `lplfinancial`, - `lr`, - `ls`, - `lt`, - `ltd`, - `ltda`, - `lu`, - `lundbeck`, - `lupin`, - `luxe`, - `luxury`, - `lv`, - `ly`, - `ma`, - `macys`, - `madrid`, - `maif`, - `maison`, - `makeup`, - `man`, - `management`, - `mango`, - `map`, - `market`, - `marketing`, - `markets`, - `marriott`, - `marshalls`, - `maserati`, - `mattel`, - `mba`, - `mc`, - `mckinsey`, - `md`, - `me`, - `med`, - `media`, - `meet`, - `melbourne`, - `meme`, - `memorial`, - `men`, - `menu`, - `merckmsd`, - `metlife`, - `mg`, - `mh`, - `miami`, - `microsoft`, - `mil`, - `mini`, - `mint`, - `mit`, - `mitsubishi`, - `mk`, - `ml`, - `mlb`, - `mls`, - `mm`, - `mma`, - `mn`, - `mo`, - `mobi`, - `mobile`, - `mobily`, - `moda`, - `moe`, - `moi`, - `mom`, - `monash`, - `money`, - `monster`, - `mopar`, - `mormon`, - `mortgage`, - `moscow`, - `moto`, - `motorcycles`, - `mov`, - `movie`, - `movistar`, - `mp`, - `mq`, - `mr`, - `ms`, - `msd`, - `mt`, - `mtn`, - `mtr`, - `mu`, - `museum`, - `mutual`, - `mv`, - `mw`, - `mx`, - `my`, - `mz`, - `na`, - `nab`, - `nadex`, - `nagoya`, - `name`, - `nationwide`, - `natura`, - `navy`, - `nba`, - `nc`, - `ne`, - `nec`, - `net`, - `netbank`, - `netflix`, - `network`, - `neustar`, - `new`, - `newholland`, - `news`, - `next`, - `nextdirect`, - `nexus`, - `nf`, - `nfl`, - `ng`, - `ngo`, - `nhk`, - `ni`, - `nico`, - `nike`, - `nikon`, - `ninja`, - `nissan`, - `nissay`, - `nl`, - `no`, - `nokia`, - `northwesternmutual`, - `norton`, - `now`, - `nowruz`, - `nowtv`, - `np`, - `nr`, - `nra`, - `nrw`, - `ntt`, - `nu`, - `nyc`, - `nz`, - `obi`, - `observer`, - `off`, - `office`, - `okinawa`, - `olayan`, - `olayangroup`, - `oldnavy`, - `ollo`, - `om`, - `omega`, - `one`, - `ong`, - `onion`, - `onl`, - `online`, - `onyourside`, - `ooo`, - `open`, - `oracle`, - `orange`, - `org`, - `organic`, - `origins`, - `osaka`, - `otsuka`, - `ott`, - `ovh`, - `pa`, - `page`, - `panasonic`, - `panerai`, - `paris`, - `pars`, - `partners`, - `parts`, - `party`, - `passagens`, - `pay`, - `pccw`, - `pe`, - `pet`, - `pf`, - `pfizer`, - `pg`, - `ph`, - `pharmacy`, - `phd`, - `philips`, - `phone`, - `photo`, - `photography`, - `photos`, - `physio`, - `piaget`, - `pics`, - `pictet`, - `pictures`, - `pid`, - `pin`, - `ping`, - `pink`, - `pioneer`, - `pizza`, - `pk`, - `pl`, - `place`, - `play`, - `playstation`, - `plumbing`, - `plus`, - `pm`, - `pn`, - `pnc`, - `pohl`, - `poker`, - `politie`, - `porn`, - `post`, - `pr`, - `pramerica`, - `praxi`, - `press`, - `prime`, - `pro`, - `prod`, - `productions`, - `prof`, - `progressive`, - `promo`, - `properties`, - `property`, - `protection`, - `pru`, - `prudential`, - `ps`, - `pt`, - `pub`, - `pw`, - `pwc`, - `py`, - `qa`, - `qpon`, - `quebec`, - `quest`, - `qvc`, - `racing`, - `radio`, - `raid`, - `re`, - `read`, - `realestate`, - `realtor`, - `realty`, - `recipes`, - `red`, - `redstone`, - `redumbrella`, - `rehab`, - `reise`, - `reisen`, - `reit`, - `reliance`, - `ren`, - `rent`, - `rentals`, - `repair`, - `report`, - `republican`, - `rest`, - `restaurant`, - `review`, - `reviews`, - `rexroth`, - `rich`, - `richardli`, - `ricoh`, - `rightathome`, - `ril`, - `rio`, - `rip`, - `rmit`, - `ro`, - `rocher`, - `rocks`, - `rodeo`, - `rogers`, - `room`, - `rs`, - `rsvp`, - `ru`, - `rugby`, - `ruhr`, - `run`, - `rw`, - `rwe`, - `ryukyu`, - `sa`, - `saarland`, - `safe`, - `safety`, - `sakura`, - `sale`, - `salon`, - `samsclub`, - `samsung`, - `sandvik`, - `sandvikcoromant`, - `sanofi`, - `sap`, - `sarl`, - `sas`, - `save`, - `saxo`, - `sb`, - `sbi`, - `sbs`, - `sc`, - `sca`, - `scb`, - `schaeffler`, - `schmidt`, - `scholarships`, - `school`, - `schule`, - `schwarz`, - `science`, - `scjohnson`, - `scor`, - `scot`, - `sd`, - `se`, - `search`, - `seat`, - `secure`, - `security`, - `seek`, - `select`, - `sener`, - `services`, - `ses`, - `seven`, - `sew`, - `sex`, - `sexy`, - `sfr`, - `sg`, - `sh`, - `shangrila`, - `sharp`, - `shaw`, - `shell`, - `shia`, - `shiksha`, - `shoes`, - `shop`, - `shopping`, - `shouji`, - `show`, - `showtime`, - `shriram`, - `si`, - `silk`, - `sina`, - `singles`, - `site`, - `sj`, - `sk`, - `ski`, - `skin`, - `sky`, - `skype`, - `sl`, - `sling`, - `sm`, - `smart`, - `smile`, - `sn`, - `sncf`, - `so`, - `soccer`, - `social`, - `softbank`, - `software`, - `sohu`, - `solar`, - `solutions`, - `song`, - `sony`, - `soy`, - `space`, - `spiegel`, - `sport`, - `spot`, - `spreadbetting`, - `sr`, - `srl`, - `srt`, - `st`, - `stada`, - `staples`, - `star`, - `starhub`, - `statebank`, - `statefarm`, - `statoil`, - `stc`, - `stcgroup`, - `stockholm`, - `storage`, - `store`, - `stream`, - `studio`, - `study`, - `style`, - `su`, - `sucks`, - `supplies`, - `supply`, - `support`, - `surf`, - `surgery`, - `suzuki`, - `sv`, - `swatch`, - `swiftcover`, - `swiss`, - `sx`, - `sy`, - `sydney`, - `symantec`, - `systems`, - `sz`, - `tab`, - `taipei`, - `talk`, - `taobao`, - `target`, - `tatamotors`, - `tatar`, - `tattoo`, - `tax`, - `taxi`, - `tc`, - `tci`, - `td`, - `tdk`, - `team`, - `tech`, - `technology`, - `tel`, - `telecity`, - `telefonica`, - `temasek`, - `tennis`, - `teva`, - `tf`, - `tg`, - `th`, - `thd`, - `theater`, - `theatre`, - `tiaa`, - `tickets`, - `tienda`, - `tiffany`, - `tips`, - `tires`, - `tirol`, - `tj`, - `tjmaxx`, - `tjx`, - `tk`, - `tkmaxx`, - `tl`, - `tm`, - `tmall`, - `tn`, - `to`, - `today`, - `tokyo`, - `tools`, - `top`, - `toray`, - `toshiba`, - `total`, - `tours`, - `town`, - `toyota`, - `toys`, - `tr`, - `trade`, - `trading`, - `training`, - `travel`, - `travelchannel`, - `travelers`, - `travelersinsurance`, - `trust`, - `trv`, - `tt`, - `tube`, - `tui`, - `tunes`, - `tushu`, - `tv`, - `tvs`, - `tw`, - `tz`, - `ua`, - `ubank`, - `ubs`, - `uconnect`, - `ug`, - `uk`, - `unicom`, - `university`, - `uno`, - `uol`, - `ups`, - `us`, - `uy`, - `uz`, - `va`, - `vacations`, - `vana`, - `vanguard`, - `vc`, - `ve`, - `vegas`, - `ventures`, - `verisign`, - `vermögensberater`, - `vermögensberatung`, - `versicherung`, - `vet`, - `vg`, - `vi`, - `viajes`, - `video`, - `vig`, - `viking`, - `villas`, - `vin`, - `vip`, - `virgin`, - `visa`, - `vision`, - `vista`, - `vistaprint`, - `viva`, - `vivo`, - `vlaanderen`, - `vn`, - `vodka`, - `volkswagen`, - `volvo`, - `vote`, - `voting`, - `voto`, - `voyage`, - `vu`, - `vuelos`, - `wales`, - `walmart`, - `walter`, - `wang`, - `wanggou`, - `warman`, - `watch`, - `watches`, - `weather`, - `weatherchannel`, - `webcam`, - `weber`, - `website`, - `wed`, - `wedding`, - `weibo`, - `weir`, - `wf`, - `whoswho`, - `wien`, - `wiki`, - `williamhill`, - `win`, - `windows`, - `wine`, - `winners`, - `wme`, - `wolterskluwer`, - `woodside`, - `work`, - `works`, - `world`, - `wow`, - `ws`, - `wtc`, - `wtf`, - `xbox`, - `xerox`, - `xfinity`, - `xihuan`, - `xin`, - `xxx`, - `xyz`, - `yachts`, - `yahoo`, - `yamaxun`, - `yandex`, - `ye`, - `yodobashi`, - `yoga`, - `yokohama`, - `you`, - `youtube`, - `yt`, - `yun`, - `za`, - `zappos`, - `zara`, - `zero`, - `zip`, - `zippo`, - `zm`, - `zone`, - `zuerich`, - `zw`, - `ελ`, - `бг`, - `бел`, - `дети`, - `ею`, - `католик`, - `ком`, - `мкд`, - `мон`, - `москва`, - `онлайн`, - `орг`, - `рус`, - `рф`, - `сайт`, - `срб`, - `укр`, - `қаз`, - `հայ`, - `קום`, - `ابوظبي`, - `اتصالات`, - `ارامكو`, - `الاردن`, - `الجزائر`, - `السعودية`, - `السعوديه`, - `السعودیة`, - `السعودیۃ`, - `العليان`, - `المغرب`, - `اليمن`, - `امارات`, - `ايران`, - `ایران`, - `بارت`, - `بازار`, - `بيتك`, - `بھارت`, - `تونس`, - `سودان`, - `سوريا`, - `سورية`, - `شبكة`, - `عراق`, - `عرب`, - `عمان`, - `فلسطين`, - `قطر`, - `كاثوليك`, - `كوم`, - `مصر`, - `مليسيا`, - `موبايلي`, - `موقع`, - `همراه`, - `پاكستان`, - `پاکستان`, - `ڀارت`, - `कॉम`, - `नेट`, - `भारत`, - `भारतम्`, - `भारोत`, - `संगठन`, - `বাংলা`, - `ভারত`, - `ভাৰত`, - `ਭਾਰਤ`, - `ભારત`, - `ଭାରତ`, - `இந்தியா`, - `இலங்கை`, - `சிங்கப்பூர்`, - `భారత్`, - `ಭಾರತ`, - `ഭാരതം`, - `ලංකා`, - `คอม`, - `ไทย`, - `გე`, - `みんな`, - `クラウド`, - `グーグル`, - `コム`, - `ストア`, - `セール`, - `ファッション`, - `ポイント`, - `世界`, - `中信`, - `中国`, - `中國`, - `中文网`, - `企业`, - `佛山`, - `信息`, - `健康`, - `八卦`, - `公司`, - `公益`, - `台湾`, - `台灣`, - `商城`, - `商店`, - `商标`, - `嘉里`, - `嘉里大酒店`, - `在线`, - `大众汽车`, - `大拿`, - `天主教`, - `娱乐`, - `家電`, - `工行`, - `广东`, - `微博`, - `慈善`, - `我爱你`, - `手机`, - `手表`, - `招聘`, - `政务`, - `政府`, - `新加坡`, - `新闻`, - `时尚`, - `書籍`, - `机构`, - `淡马锡`, - `游戏`, - `澳門`, - `澳门`, - `点看`, - `珠宝`, - `移动`, - `组织机构`, - `网址`, - `网店`, - `网站`, - `网络`, - `联通`, - `臺灣`, - `诺基亚`, - `谷歌`, - `购物`, - `通販`, - `集团`, - `電訊盈科`, - `飞利浦`, - `食品`, - `餐厅`, - `香格里拉`, - `香港`, - `닷넷`, - `닷컴`, - `삼성`, - `한국`, -} diff --git a/vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go b/vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go deleted file mode 100644 index 94c67d15b..000000000 --- a/vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright (c) 2015, Daniel Martí -// See LICENSE for licensing information - -package xurls - -// PseudoTLDs is a sorted list of some widely used unofficial TLDs. -// -// Sources: -// * https://en.wikipedia.org/wiki/Pseudo-top-level_domain -// * https://en.wikipedia.org/wiki/Category:Pseudo-top-level_domains -// * https://tools.ietf.org/html/draft-grothoff-iesg-special-use-p2p-names-00 -// * https://www.iana.org/assignments/special-use-domain-names/special-use-domain-names.xhtml -var PseudoTLDs = []string{ - `bit`, // Namecoin - `example`, // Example domain - `exit`, // Tor exit node - `gnu`, // GNS by public key - `i2p`, // I2P network - `invalid`, // Invalid domain - `local`, // Local network - `localhost`, // Local network - `test`, // Test domain - `zkey`, // GNS domain name -} diff --git a/vendor/github.com/CovenantSQL/xurls/xurls.go b/vendor/github.com/CovenantSQL/xurls/xurls.go deleted file mode 100644 index 5a98ac7cf..000000000 --- a/vendor/github.com/CovenantSQL/xurls/xurls.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2015, Daniel Martí -// See LICENSE for licensing information - -// Package xurls extracts urls from plain text using regular expressions. -package xurls - -import ( - "bytes" - "regexp" -) - -//go:generate go run generate/tldsgen/main.go -//go:generate go run generate/schemesgen/main.go - -const ( - letter = `\p{L}` - mark = `\p{M}` - number = `\p{N}` - iriChar = letter + mark + number - currency = `\p{Sc}` - otherSymb = `\p{So}` - endChar = iriChar + `/\-+_&~*%=#` + currency + otherSymb - otherPunc = `\p{Po}` - midChar = endChar + `|` + otherPunc - wellParen = `\([` + midChar + `]*(\([` + midChar + `]*\)[` + midChar + `]*)*\)` - wellBrack = `\[[` + midChar + `]*(\[[` + midChar + `]*\][` + midChar + `]*)*\]` - wellBrace = `\{[` + midChar + `]*(\{[` + midChar + `]*\}[` + midChar + `]*)*\}` - wellAll = wellParen + `|` + wellBrack + `|` + wellBrace - pathCont = `([` + midChar + `]*(` + wellAll + `|[` + endChar + `])+)+` - - iri = `[` + iriChar + `]([` + iriChar + `\-]*[` + iriChar + `])?` - domain = `(` + iri + `\.)+` - octet = `(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[1-9][0-9]|[0-9])` - ipv4Addr = `\b` + octet + `\.` + octet + `\.` + octet + `\.` + octet + `\b` - ipv6Addr = `([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:([0-9a-fA-F]{1,4}:[0-9a-fA-F]{0,4}|:[0-9a-fA-F]{1,4})?|(:[0-9a-fA-F]{1,4}){0,2})|(:[0-9a-fA-F]{1,4}){0,3})|(:[0-9a-fA-F]{1,4}){0,4})|:(:[0-9a-fA-F]{1,4}){0,5})((:[0-9a-fA-F]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9a-fA-F]{1,4}:){1,6}|:):[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){7}:` - ipAddr = `(` + ipv4Addr + `|` + ipv6Addr + `)` - port = `(:[0-9]*)?` -) - -// AnyScheme can be passed to StrictMatchingScheme to match any possibly -// valid scheme. -var AnyScheme = `([a-zA-Z][a-zA-Z.\-+]*://|` + anyOf(SchemesNoAuthority...) + `:)` - -// SchemesNoAuthority is a sorted list of some well-known url schemes that are -// followed by ":" instead of "://". -var SchemesNoAuthority = []string{ - `bitcoin`, // Bitcoin - `file`, // Files - `magnet`, // Torrent magnets - `mailto`, // Mail - `sms`, // SMS - `tel`, // Telephone - `xmpp`, // XMPP -} - -func anyOf(strs ...string) string { - var b bytes.Buffer - b.WriteByte('(') - for i, s := range strs { - if i != 0 { - b.WriteByte('|') - } - b.WriteString(regexp.QuoteMeta(s)) - } - b.WriteByte(')') - return b.String() -} - -func strictExp() string { - schemes := `(` + anyOf(Schemes...) + `://|` + anyOf(SchemesNoAuthority...) + `:)` - return `(?i)` + schemes + `(?-i)` + pathCont -} - -func relaxedExp() string { - site := domain + `(?i)` + anyOf(append(TLDs, PseudoTLDs...)...) + `(?-i)` - hostName := `(` + site + `|` + ipAddr + `)` - webURL := hostName + port + `(/|/` + pathCont + `?|\b|$)` - return strictExp() + `|` + webURL -} - -func Relaxed() *regexp.Regexp { - re := regexp.MustCompile(relaxedExp()) - re.Longest() - return re -} - -func Strict() *regexp.Regexp { - re := regexp.MustCompile(strictExp()) - re.Longest() - return re -} - -// StrictMatchingScheme produces a regexp that matches urls like Strict but -// whose scheme matches the given regular expression. -func StrictMatchingScheme(exp string) (*regexp.Regexp, error) { - strictMatching := `(?i)(` + exp + `)(?-i)` + pathCont - re, err := regexp.Compile(strictMatching) - if err != nil { - return nil, err - } - re.Longest() - return re, nil -} diff --git a/vendor/github.com/beevik/ntp/CONTRIBUTORS b/vendor/github.com/beevik/ntp/CONTRIBUTORS deleted file mode 100644 index 626c12eb5..000000000 --- a/vendor/github.com/beevik/ntp/CONTRIBUTORS +++ /dev/null @@ -1,7 +0,0 @@ -Brett Vickers (beevik) -Mikhail Salosin (AlphaB) -Anton Tolchanov (knyar) -Christopher Batey (chbatey) -Meng Zhuo (mengzhuo) -Leonid Evdokimov (darkk) -Ask Bjørn Hansen (abh) \ No newline at end of file diff --git a/vendor/github.com/beevik/ntp/LICENSE b/vendor/github.com/beevik/ntp/LICENSE deleted file mode 100644 index 45d3d4959..000000000 --- a/vendor/github.com/beevik/ntp/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright 2015-2017 Brett Vickers. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER ``AS IS'' AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/beevik/ntp/README.md b/vendor/github.com/beevik/ntp/README.md deleted file mode 100644 index 9719733b7..000000000 --- a/vendor/github.com/beevik/ntp/README.md +++ /dev/null @@ -1,71 +0,0 @@ -[![Build Status](https://travis-ci.org/beevik/ntp.svg?branch=master)](https://travis-ci.org/beevik/ntp) -[![GoDoc](https://godoc.org/github.com/beevik/ntp?status.svg)](https://godoc.org/github.com/beevik/ntp) - -ntp -=== - -The ntp package is an implementation of a Simple NTP (SNTP) client based on -[RFC5905](https://tools.ietf.org/html/rfc5905). It allows you to connect to -a remote NTP server and request information about the current time. - - -## Querying the current time - -If all you care about is the current time according to a remote NTP server, -simply use the `Time` function: -```go -time, err := ntp.Time("0.beevik-ntp.pool.ntp.org") -``` - - -## Querying time metadata - -To obtain the current time as well as some additional metadata about the time, -use the `Query` function: -```go -response, err := ntp.Query("0.beevik-ntp.pool.ntp.org") -time := time.Now().Add(response.ClockOffset) -``` - -Alternatively, use the `QueryWithOptions` function if you want to change the -default behavior used by the `Query` function: -```go -options := ntp.QueryOptions{ Timeout: 30*time.Second, TTL: 5 } -response, err := ntp.QueryWithOptions("0.beevik-ntp.pool.ntp.org", options) -time := time.Now().Add(response.ClockOffset) -``` - -The `Response` structure returned by `Query` includes the following -information: -* `Time`: The time the server transmitted its response, according to its own clock. -* `ClockOffset`: The estimated offset of the local system clock relative to the server's clock. For a more accurate time reading, you may add this offset to any subsequent system clock reading. -* `RTT`: An estimate of the round-trip-time delay between the client and the server. -* `Precision`: The precision of the server's clock reading. -* `Stratum`: The server's stratum, which indicates the number of hops from the server to the reference clock. A stratum 1 server is directly attached to the reference clock. If the stratum is zero, the server has responded with the "kiss of death". -* `ReferenceID`: A unique identifier for the consulted reference clock. -* `ReferenceTime`: The time at which the server last updated its local clock setting. -* `RootDelay`: The server's aggregate round-trip-time delay to the stratum 1 server. -* `RootDispersion`: The server's estimated maximum measurement error relative to the reference clock. -* `RootDistance`: An estimate of the root synchronization distance between the client and the stratum 1 server. -* `Leap`: The leap second indicator, indicating whether a second should be added to or removed from the current month's last minute. -* `MinError`: A lower bound on the clock error between the client and the server. -* `KissCode`: A 4-character string describing the reason for a "kiss of death" response (stratum=0). -* `Poll`: The maximum polling interval between successive messages to the server. - -The `Response` structure's `Validate` method performs additional sanity checks -to determine whether the response is suitable for time synchronization -purposes. -```go -err := response.Validate() -if err == nil { - // response data is suitable for synchronization purposes -} -``` - -## Using the NTP pool - -The NTP pool is a shared resource used by people all over the world. -To prevent it from becoming overloaded, please avoid querying the standard -`pool.ntp.org` zone names in your applications. Instead, consider requesting -your own [vendor zone](http://www.pool.ntp.org/en/vendors.html) or [joining -the pool](http://www.pool.ntp.org/join.html). diff --git a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md b/vendor/github.com/beevik/ntp/RELEASE_NOTES.md deleted file mode 100644 index 603765e91..000000000 --- a/vendor/github.com/beevik/ntp/RELEASE_NOTES.md +++ /dev/null @@ -1,54 +0,0 @@ -Release v0.2.0 -============== - -There are no breaking changes or further deprecations in this release. - -**Changes** - -* Added `KissCode` to the `Response` structure. - - -Release v0.1.1 -============== - -**Breaking changes** - -* Removed the `MaxStratum` constant. - -**Deprecations** - -* Officially deprecated the `TimeV` function. - -**Internal changes** - -* Removed `minDispersion` from the `RootDistance` calculation, since the value - was arbitrary. -* Moved some validation into main code path so that invalid `TransmitTime` and - `mode` responses trigger an error even when `Response.Validate` is not - called. - - -Release v0.1.0 -============== - -This is the initial release of the `ntp` package. Currently it supports the following features: -* `Time()` to query the current time according to a remote NTP server. -* `Query()` to query multiple pieces of time-related information from a remote NTP server. -* `QueryWithOptions()`, which is like `Query()` but with the ability to override default query options. - -Time-related information returned by the `Query` functions includes: -* `Time`: the time the server transmitted its response, according to the server's clock. -* `ClockOffset`: the estimated offset of the client's clock relative to the server's clock. You may apply this offset to any local system clock reading once the query is complete. -* `RTT`: an estimate of the round-trip-time delay between the client and the server. -* `Precision`: the precision of the server's clock reading. -* `Stratum`: the "stratum" level of the server, where 1 indicates a server directly connected to a reference clock, and values greater than 1 indicating the number of hops from the reference clock. -* `ReferenceID`: A unique identifier for the NTP server that was contacted. -* `ReferenceTime`: The time at which the server last updated its local clock setting. -* `RootDelay`: The server's round-trip delay to the reference clock. -* `RootDispersion`: The server's total dispersion to the referenced clock. -* `RootDistance`: An estimate of the root synchronization distance. -* `Leap`: The leap second indicator. -* `MinError`: A lower bound on the clock error between the client and the server. -* `Poll`: the maximum polling interval between successive messages on the server. - -The `Response` structure returned by the `Query` functions also contains a `Response.Validate()` function that returns an error if any of the fields returned by the server are invalid. diff --git a/vendor/github.com/beevik/ntp/ntp.go b/vendor/github.com/beevik/ntp/ntp.go deleted file mode 100644 index ba47e436d..000000000 --- a/vendor/github.com/beevik/ntp/ntp.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright 2015-2017 Brett Vickers. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ntp provides an implementation of a Simple NTP (SNTP) client -// capable of querying the current time from a remote NTP server. See -// RFC5905 (https://tools.ietf.org/html/rfc5905) for more details. -// -// This approach grew out of a go-nuts post by Michael Hofmann: -// https://groups.google.com/forum/?fromgroups#!topic/golang-nuts/FlcdMU5fkLQ -package ntp - -import ( - "crypto/rand" - "encoding/binary" - "errors" - "fmt" - "net" - "time" - - "golang.org/x/net/ipv4" -) - -// The LeapIndicator is used to warn if a leap second should be inserted -// or deleted in the last minute of the current month. -type LeapIndicator uint8 - -const ( - // LeapNoWarning indicates no impending leap second. - LeapNoWarning LeapIndicator = 0 - - // LeapAddSecond indicates the last minute of the day has 61 seconds. - LeapAddSecond = 1 - - // LeapDelSecond indicates the last minute of the day has 59 seconds. - LeapDelSecond = 2 - - // LeapNotInSync indicates an unsynchronized leap second. - LeapNotInSync = 3 -) - -// Internal constants -const ( - defaultNtpVersion = 4 - nanoPerSec = 1000000000 - maxStratum = 16 - defaultTimeout = 5 * time.Second - maxPollInterval = (1 << 17) * time.Second - maxDispersion = 16 * time.Second -) - -// Internal variables -var ( - ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) -) - -type mode uint8 - -// NTP modes. This package uses only client mode. -const ( - reserved mode = 0 + iota - symmetricActive - symmetricPassive - client - server - broadcast - controlMessage - reservedPrivate -) - -// An ntpTime is a 64-bit fixed-point (Q32.32) representation of the number of -// seconds elapsed. -type ntpTime uint64 - -// Duration interprets the fixed-point ntpTime as a number of elapsed seconds -// and returns the corresponding time.Duration value. -func (t ntpTime) Duration() time.Duration { - sec := (t >> 32) * nanoPerSec - frac := (t & 0xffffffff) * nanoPerSec >> 32 - return time.Duration(sec + frac) -} - -// Time interprets the fixed-point ntpTime as an absolute time and returns -// the corresponding time.Time value. -func (t ntpTime) Time() time.Time { - return ntpEpoch.Add(t.Duration()) -} - -// toNtpTime converts the time.Time value t into its 64-bit fixed-point -// ntpTime representation. -func toNtpTime(t time.Time) ntpTime { - nsec := uint64(t.Sub(ntpEpoch)) - sec := nsec / nanoPerSec - // Round up the fractional component so that repeated conversions - // between time.Time and ntpTime do not yield continually decreasing - // results. - frac := (((nsec - sec*nanoPerSec) << 32) + nanoPerSec - 1) / nanoPerSec - return ntpTime(sec<<32 | frac) -} - -// An ntpTimeShort is a 32-bit fixed-point (Q16.16) representation of the -// number of seconds elapsed. -type ntpTimeShort uint32 - -// Duration interprets the fixed-point ntpTimeShort as a number of elapsed -// seconds and returns the corresponding time.Duration value. -func (t ntpTimeShort) Duration() time.Duration { - t64 := uint64(t) - sec := (t64 >> 16) * nanoPerSec - frac := (t64 & 0xffff) * nanoPerSec >> 16 - return time.Duration(sec + frac) -} - -// msg is an internal representation of an NTP packet. -type msg struct { - LiVnMode uint8 // Leap Indicator (2) + Version (3) + Mode (3) - Stratum uint8 - Poll int8 - Precision int8 - RootDelay ntpTimeShort - RootDispersion ntpTimeShort - ReferenceID uint32 - ReferenceTime ntpTime - OriginTime ntpTime - ReceiveTime ntpTime - TransmitTime ntpTime -} - -// setVersion sets the NTP protocol version on the message. -func (m *msg) setVersion(v int) { - m.LiVnMode = (m.LiVnMode & 0xc7) | uint8(v)<<3 -} - -// setMode sets the NTP protocol mode on the message. -func (m *msg) setMode(md mode) { - m.LiVnMode = (m.LiVnMode & 0xf8) | uint8(md) -} - -// setLeap modifies the leap indicator on the message. -func (m *msg) setLeap(li LeapIndicator) { - m.LiVnMode = (m.LiVnMode & 0x3f) | uint8(li)<<6 -} - -// getVersion returns the version value in the message. -func (m *msg) getVersion() int { - return int((m.LiVnMode >> 3) & 0x07) -} - -// getMode returns the mode value in the message. -func (m *msg) getMode() mode { - return mode(m.LiVnMode & 0x07) -} - -// getLeap returns the leap indicator on the message. -func (m *msg) getLeap() LeapIndicator { - return LeapIndicator((m.LiVnMode >> 6) & 0x03) -} - -// QueryOptions contains the list of configurable options that may be used -// with the QueryWithOptions function. -type QueryOptions struct { - Timeout time.Duration // defaults to 5 seconds - Version int // NTP protocol version, defaults to 4 - LocalAddress string // IP address to use for the client address - Port int // Server port, defaults to 123 - TTL int // IP TTL to use, defaults to system default -} - -// A Response contains time data, some of which is returned by the NTP server -// and some of which is calculated by the client. -type Response struct { - // Time is the transmit time reported by the server just before it - // responded to the client's NTP query. - Time time.Time - - // ClockOffset is the estimated offset of the client clock relative to - // the server. Add this to the client's system clock time to obtain a - // more accurate time. - ClockOffset time.Duration - - // RTT is the measured round-trip-time delay estimate between the client - // and the server. - RTT time.Duration - - // Precision is the reported precision of the server's clock. - Precision time.Duration - - // Stratum is the "stratum level" of the server. The smaller the number, - // the closer the server is to the reference clock. Stratum 1 servers are - // attached directly to the reference clock. A stratum value of 0 - // indicates the "kiss of death," which typically occurs when the client - // issues too many requests to the server in a short period of time. - Stratum uint8 - - // ReferenceID is a 32-bit identifier identifying the server or - // reference clock. - ReferenceID uint32 - - // ReferenceTime is the time when the server's system clock was last - // set or corrected. - ReferenceTime time.Time - - // RootDelay is the server's estimated aggregate round-trip-time delay to - // the stratum 1 server. - RootDelay time.Duration - - // RootDispersion is the server's estimated maximum measurement error - // relative to the stratum 1 server. - RootDispersion time.Duration - - // RootDistance is an estimate of the total synchronization distance - // between the client and the stratum 1 server. - RootDistance time.Duration - - // Leap indicates whether a leap second should be added or removed from - // the current month's last minute. - Leap LeapIndicator - - // MinError is a lower bound on the error between the client and server - // clocks. When the client and server are not synchronized to the same - // clock, the reported timestamps may appear to violate the principle of - // causality. In other words, the NTP server's response may indicate - // that a message was received before it was sent. In such cases, the - // minimum error may be useful. - MinError time.Duration - - // KissCode is a 4-character string describing the reason for a - // "kiss of death" response (stratum = 0). For a list of standard kiss - // codes, see https://tools.ietf.org/html/rfc5905#section-7.4. - KissCode string - - // Poll is the maximum interval between successive NTP polling messages. - // It is not relevant for simple NTP clients like this one. - Poll time.Duration -} - -// Validate checks if the response is valid for the purposes of time -// synchronization. -func (r *Response) Validate() error { - // Handle invalid stratum values. - if r.Stratum == 0 { - return fmt.Errorf("kiss of death received: %s", r.KissCode) - } - if r.Stratum >= maxStratum { - return errors.New("invalid stratum in response") - } - - // Handle invalid leap second indicator. - if r.Leap == LeapNotInSync { - return errors.New("invalid leap second") - } - - // Estimate the "freshness" of the time. If it exceeds the maximum - // polling interval (~36 hours), then it cannot be considered "fresh". - freshness := r.Time.Sub(r.ReferenceTime) - if freshness > maxPollInterval { - return errors.New("server clock not fresh") - } - - // Calculate the peer synchronization distance, lambda: - // lambda := RootDelay/2 + RootDispersion - // If this value exceeds MAXDISP (16s), then the time is not suitable - // for synchronization purposes. - // https://tools.ietf.org/html/rfc5905#appendix-A.5.1.1. - lambda := r.RootDelay/2 + r.RootDispersion - if lambda > maxDispersion { - return errors.New("invalid dispersion") - } - - // If the server's transmit time is before its reference time, the - // response is invalid. - if r.Time.Before(r.ReferenceTime) { - return errors.New("invalid time reported") - } - - // nil means the response is valid. - return nil -} - -// Query returns a response from the remote NTP server host. It contains -// the time at which the server transmitted the response as well as other -// useful information about the time and the remote server. -func Query(host string) (*Response, error) { - return QueryWithOptions(host, QueryOptions{}) -} - -// QueryWithOptions performs the same function as Query but allows for the -// customization of several query options. -func QueryWithOptions(host string, opt QueryOptions) (*Response, error) { - m, now, err := getTime(host, opt) - if err != nil { - return nil, err - } - return parseTime(m, now), nil -} - -// TimeV returns the current time using information from a remote NTP server. -// On error, it returns the local system time. The version may be 2, 3, or 4. -// -// Deprecated: TimeV is deprecated. Use QueryWithOptions instead. -func TimeV(host string, version int) (time.Time, error) { - m, recvTime, err := getTime(host, QueryOptions{Version: version}) - if err != nil { - return time.Now(), err - } - - r := parseTime(m, recvTime) - err = r.Validate() - if err != nil { - return time.Now(), err - } - - // Use the clock offset to calculate the time. - return time.Now().Add(r.ClockOffset), nil -} - -// Time returns the current time using information from a remote NTP server. -// It uses version 4 of the NTP protocol. On error, it returns the local -// system time. -func Time(host string) (time.Time, error) { - return TimeV(host, defaultNtpVersion) -} - -// getTime performs the NTP server query and returns the response message -// along with the local system time it was received. -func getTime(host string, opt QueryOptions) (*msg, ntpTime, error) { - if opt.Version == 0 { - opt.Version = defaultNtpVersion - } - if opt.Version < 2 || opt.Version > 4 { - return nil, 0, errors.New("invalid protocol version requested") - } - - // Resolve the remote NTP server address. - raddr, err := net.ResolveUDPAddr("udp", net.JoinHostPort(host, "123")) - if err != nil { - return nil, 0, err - } - - // Resolve the local address if specified as an option. - var laddr *net.UDPAddr - if opt.LocalAddress != "" { - laddr, err = net.ResolveUDPAddr("udp", net.JoinHostPort(opt.LocalAddress, "0")) - if err != nil { - return nil, 0, err - } - } - - // Override the port if requested. - if opt.Port != 0 { - raddr.Port = opt.Port - } - - // Prepare a "connection" to the remote server. - con, err := net.DialUDP("udp", laddr, raddr) - if err != nil { - return nil, 0, err - } - defer con.Close() - - // Set a TTL for the packet if requested. - if opt.TTL != 0 { - ipcon := ipv4.NewConn(con) - err = ipcon.SetTTL(opt.TTL) - if err != nil { - return nil, 0, err - } - } - - // Set a timeout on the connection. - if opt.Timeout == 0 { - opt.Timeout = defaultTimeout - } - con.SetDeadline(time.Now().Add(opt.Timeout)) - - // Allocate a message to hold the response. - recvMsg := new(msg) - - // Allocate a message to hold the query. - xmitMsg := new(msg) - xmitMsg.setMode(client) - xmitMsg.setVersion(opt.Version) - xmitMsg.setLeap(LeapNotInSync) - - // To ensure privacy and prevent spoofing, try to use a random 64-bit - // value for the TransmitTime. If crypto/rand couldn't generate a - // random value, fall back to using the system clock. Keep track of - // when the messsage was actually transmitted. - bits := make([]byte, 8) - _, err = rand.Read(bits) - var xmitTime time.Time - if err == nil { - xmitMsg.TransmitTime = ntpTime(binary.BigEndian.Uint64(bits)) - xmitTime = time.Now() - } else { - xmitTime = time.Now() - xmitMsg.TransmitTime = toNtpTime(xmitTime) - } - - // Transmit the query. - err = binary.Write(con, binary.BigEndian, xmitMsg) - if err != nil { - return nil, 0, err - } - - // Receive the response. - err = binary.Read(con, binary.BigEndian, recvMsg) - if err != nil { - return nil, 0, err - } - - // Keep track of the time the response was received. - delta := time.Since(xmitTime) - if delta < 0 { - // The local system may have had its clock adjusted since it - // sent the query. In go 1.9 and later, time.Since ensures - // that a monotonic clock is used, so delta can never be less - // than zero. In versions before 1.9, a monotonic clock is - // not used, so we have to check. - return nil, 0, errors.New("client clock ticked backwards") - } - recvTime := toNtpTime(xmitTime.Add(delta)) - - // Check for invalid fields. - if recvMsg.getMode() != server { - return nil, 0, errors.New("invalid mode in response") - } - if recvMsg.TransmitTime == ntpTime(0) { - return nil, 0, errors.New("invalid transmit time in response") - } - if recvMsg.OriginTime != xmitMsg.TransmitTime { - return nil, 0, errors.New("server response mismatch") - } - if recvMsg.ReceiveTime > recvMsg.TransmitTime { - return nil, 0, errors.New("server clock ticked backwards") - } - - // Correct the received message's origin time using the actual - // transmit time. - recvMsg.OriginTime = toNtpTime(xmitTime) - - return recvMsg, recvTime, nil -} - -// parseTime parses the NTP packet along with the packet receive time to -// generate a Response record. -func parseTime(m *msg, recvTime ntpTime) *Response { - r := &Response{ - Time: m.TransmitTime.Time(), - ClockOffset: offset(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - RTT: rtt(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Precision: toInterval(m.Precision), - Stratum: m.Stratum, - ReferenceID: m.ReferenceID, - ReferenceTime: m.ReferenceTime.Time(), - RootDelay: m.RootDelay.Duration(), - RootDispersion: m.RootDispersion.Duration(), - Leap: m.getLeap(), - MinError: minError(m.OriginTime, m.ReceiveTime, m.TransmitTime, recvTime), - Poll: toInterval(m.Poll), - } - - // Calculate values depending on other calculated values - r.RootDistance = rootDistance(r.RTT, r.RootDelay, r.RootDispersion) - - // If a kiss of death was received, interpret the reference ID as - // a kiss code. - if r.Stratum == 0 { - r.KissCode = kissCode(r.ReferenceID) - } - - return r -} - -// The following helper functions calculate additional metadata about the -// timestamps received from an NTP server. The timestamps returned by -// the server are given the following variable names: -// -// org = Origin Timestamp (client send time) -// rec = Receive Timestamp (server receive time) -// xmt = Transmit Timestamp (server reply time) -// dst = Destination Timestamp (client receive time) - -func rtt(org, rec, xmt, dst ntpTime) time.Duration { - // round trip delay time - // rtt = (dst-org) - (xmt-rec) - a := dst.Time().Sub(org.Time()) - b := xmt.Time().Sub(rec.Time()) - rtt := a - b - if rtt < 0 { - rtt = 0 - } - return rtt -} - -func offset(org, rec, xmt, dst ntpTime) time.Duration { - // local clock offset - // offset = ((rec-org) + (xmt-dst)) / 2 - a := rec.Time().Sub(org.Time()) - b := xmt.Time().Sub(dst.Time()) - return (a + b) / time.Duration(2) -} - -func minError(org, rec, xmt, dst ntpTime) time.Duration { - // Each NTP response contains two pairs of send/receive timestamps. - // When either pair indicates a "causality violation", we calculate the - // error as the difference in time between them. The minimum error is - // the greater of the two causality violations. - var error0, error1 ntpTime - if org >= rec { - error0 = org - rec - } - if xmt >= dst { - error1 = xmt - dst - } - if error0 > error1 { - return error0.Duration() - } - return error1.Duration() -} - -func rootDistance(rtt, rootDelay, rootDisp time.Duration) time.Duration { - // The root distance is: - // the maximum error due to all causes of the local clock - // relative to the primary server. It is defined as half the - // total delay plus total dispersion plus peer jitter. - // (https://tools.ietf.org/html/rfc5905#appendix-A.5.5.2) - // - // In the reference implementation, it is calculated as follows: - // rootDist = max(MINDISP, rootDelay + rtt)/2 + rootDisp - // + peerDisp + PHI * (uptime - peerUptime) - // + peerJitter - // For an SNTP client which sends only a single packet, most of these - // terms are irrelevant and become 0. - totalDelay := rtt + rootDelay - return totalDelay/2 + rootDisp -} - -func toInterval(t int8) time.Duration { - switch { - case t > 0: - return time.Duration(uint64(time.Second) << uint(t)) - case t < 0: - return time.Duration(uint64(time.Second) >> uint(-t)) - default: - return time.Second - } -} - -func kissCode(id uint32) string { - isPrintable := func(ch byte) bool { return ch >= 32 && ch <= 126 } - - b := []byte{ - byte(id >> 24), - byte(id >> 16), - byte(id >> 8), - byte(id), - } - for _, ch := range b { - if !isPrintable(ch) { - return "" - } - } - return string(b) -} diff --git a/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go b/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go deleted file mode 100644 index 96feb78a5..000000000 --- a/vendor/github.com/dyatlov/go-opengraph/opengraph/opengraph.go +++ /dev/null @@ -1,365 +0,0 @@ -package opengraph - -import ( - "encoding/json" - "io" - "strconv" - "time" - - "golang.org/x/net/html" - "golang.org/x/net/html/atom" -) - -// Image defines Open Graph Image type -type Image struct { - URL string `json:"url"` - SecureURL string `json:"secure_url"` - Type string `json:"type"` - Width uint64 `json:"width"` - Height uint64 `json:"height"` - draft bool `json:"-"` -} - -// Video defines Open Graph Video type -type Video struct { - URL string `json:"url"` - SecureURL string `json:"secure_url"` - Type string `json:"type"` - Width uint64 `json:"width"` - Height uint64 `json:"height"` - draft bool `json:"-"` -} - -// Audio defines Open Graph Audio Type -type Audio struct { - URL string `json:"url"` - SecureURL string `json:"secure_url"` - Type string `json:"type"` - draft bool `json:"-"` -} - -// Article contain Open Graph Article structure -type Article struct { - PublishedTime *time.Time `json:"published_time"` - ModifiedTime *time.Time `json:"modified_time"` - ExpirationTime *time.Time `json:"expiration_time"` - Section string `json:"section"` - Tags []string `json:"tags"` - Authors []*Profile `json:"authors"` -} - -// Profile contains Open Graph Profile structure -type Profile struct { - FirstName string `json:"first_name"` - LastName string `json:"last_name"` - Username string `json:"username"` - Gender string `json:"gender"` -} - -// Book contains Open Graph Book structure -type Book struct { - ISBN string `json:"isbn"` - ReleaseDate *time.Time `json:"release_date"` - Tags []string `json:"tags"` - Authors []*Profile `json:"authors"` -} - -// OpenGraph contains facebook og data -type OpenGraph struct { - isArticle bool - isBook bool - isProfile bool - Type string `json:"type"` - URL string `json:"url"` - Title string `json:"title"` - Description string `json:"description"` - Determiner string `json:"determiner"` - SiteName string `json:"site_name"` - Locale string `json:"locale"` - LocalesAlternate []string `json:"locales_alternate"` - Images []*Image `json:"images"` - Audios []*Audio `json:"audios"` - Videos []*Video `json:"videos"` - Article *Article `json:"article,omitempty"` - Book *Book `json:"book,omitempty"` - Profile *Profile `json:"profile,omitempty"` -} - -// NewOpenGraph returns new instance of Open Graph structure -func NewOpenGraph() *OpenGraph { - return &OpenGraph{} -} - -// ToJSON a simple wrapper around json.Marshal -func (og *OpenGraph) ToJSON() ([]byte, error) { - return json.Marshal(og) -} - -// String return json representation of structure, or error string -func (og *OpenGraph) String() string { - data, err := og.ToJSON() - - if err != nil { - return err.Error() - } - - return string(data[:]) -} - -// ProcessHTML parses given html from Reader interface and fills up OpenGraph structure -func (og *OpenGraph) ProcessHTML(buffer io.Reader) error { - z := html.NewTokenizer(buffer) - for { - tt := z.Next() - switch tt { - case html.ErrorToken: - if z.Err() == io.EOF { - return nil - } - return z.Err() - case html.StartTagToken, html.SelfClosingTagToken, html.EndTagToken: - name, hasAttr := z.TagName() - if atom.Lookup(name) == atom.Body { - return nil // OpenGraph is only in head, so we don't need body - } - if atom.Lookup(name) != atom.Meta || !hasAttr { - continue - } - m := make(map[string]string) - var key, val []byte - for hasAttr { - key, val, hasAttr = z.TagAttr() - m[atom.String(key)] = string(val) - } - og.ProcessMeta(m) - } - } -} - -func (og *OpenGraph) ensureHasVideo() { - if len(og.Videos) > 0 { - return - } - og.Videos = append(og.Videos, &Video{draft: true}) -} - -func (og *OpenGraph) ensureHasImage() { - if len(og.Images) > 0 { - return - } - og.Images = append(og.Images, &Image{draft: true}) -} - -func (og *OpenGraph) ensureHasAudio() { - if len(og.Audios) > 0 { - return - } - og.Audios = append(og.Audios, &Audio{draft: true}) -} - -// ProcessMeta processes meta attributes and adds them to Open Graph structure if they are suitable for that -func (og *OpenGraph) ProcessMeta(metaAttrs map[string]string) { - switch metaAttrs["property"] { - case "og:description": - og.Description = metaAttrs["content"] - case "og:type": - og.Type = metaAttrs["content"] - switch og.Type { - case "article": - og.isArticle = true - case "book": - og.isBook = true - case "profile": - og.isProfile = true - } - case "og:title": - og.Title = metaAttrs["content"] - case "og:url": - og.URL = metaAttrs["content"] - case "og:determiner": - og.Determiner = metaAttrs["content"] - case "og:site_name": - og.SiteName = metaAttrs["content"] - case "og:locale": - og.Locale = metaAttrs["content"] - case "og:locale:alternate": - og.LocalesAlternate = append(og.LocalesAlternate, metaAttrs["content"]) - case "og:audio": - if len(og.Audios)>0 && og.Audios[len(og.Audios)-1].draft { - og.Audios[len(og.Audios)-1].URL = metaAttrs["content"] - og.Audios[len(og.Audios)-1].draft = false - } else { - og.Audios = append(og.Audios, &Audio{URL: metaAttrs["content"]}) - } - case "og:audio:secure_url": - og.ensureHasAudio() - og.Audios[len(og.Audios)-1].SecureURL = metaAttrs["content"] - case "og:audio:type": - og.ensureHasAudio() - og.Audios[len(og.Audios)-1].Type = metaAttrs["content"] - case "og:image": - if len(og.Images)>0 && og.Images[len(og.Images)-1].draft { - og.Images[len(og.Images)-1].URL = metaAttrs["content"] - og.Images[len(og.Images)-1].draft = false - } else { - og.Images = append(og.Images, &Image{URL: metaAttrs["content"]}) - } - case "og:image:url": - og.ensureHasImage() - og.Images[len(og.Images)-1].URL = metaAttrs["content"] - case "og:image:secure_url": - og.ensureHasImage() - og.Images[len(og.Images)-1].SecureURL = metaAttrs["content"] - case "og:image:type": - og.ensureHasImage() - og.Images[len(og.Images)-1].Type = metaAttrs["content"] - case "og:image:width": - w, err := strconv.ParseUint(metaAttrs["content"], 10, 64) - if err == nil { - og.ensureHasImage() - og.Images[len(og.Images)-1].Width = w - } - case "og:image:height": - h, err := strconv.ParseUint(metaAttrs["content"], 10, 64) - if err == nil { - og.ensureHasImage() - og.Images[len(og.Images)-1].Height = h - } - case "og:video": - if len(og.Videos)>0 && og.Videos[len(og.Videos)-1].draft { - og.Videos[len(og.Videos)-1].URL = metaAttrs["content"] - og.Videos[len(og.Videos)-1].draft = false - } else { - og.Videos = append(og.Videos, &Video{URL: metaAttrs["content"]}) - } - case "og:video:url": - og.ensureHasVideo() - og.Videos[len(og.Videos)-1].URL = metaAttrs["content"] - case "og:video:secure_url": - og.ensureHasVideo() - og.Videos[len(og.Videos)-1].SecureURL = metaAttrs["content"] - case "og:video:type": - og.ensureHasVideo() - og.Videos[len(og.Videos)-1].Type = metaAttrs["content"] - case "og:video:width": - w, err := strconv.ParseUint(metaAttrs["content"], 10, 64) - if err == nil { - og.ensureHasVideo() - og.Videos[len(og.Videos)-1].Width = w - } - case "og:video:height": - h, err := strconv.ParseUint(metaAttrs["content"], 10, 64) - if err == nil { - og.ensureHasVideo() - og.Videos[len(og.Videos)-1].Height = h - } - default: - if og.isArticle { - og.processArticleMeta(metaAttrs) - } else if og.isBook { - og.processBookMeta(metaAttrs) - } else if og.isProfile { - og.processProfileMeta(metaAttrs) - } - } -} - -func (og *OpenGraph) processArticleMeta(metaAttrs map[string]string) { - if og.Article == nil { - og.Article = &Article{} - } - switch metaAttrs["property"] { - case "article:published_time": - t, err := time.Parse(time.RFC3339, metaAttrs["content"]) - if err == nil { - og.Article.PublishedTime = &t - } - case "article:modified_time": - t, err := time.Parse(time.RFC3339, metaAttrs["content"]) - if err == nil { - og.Article.ModifiedTime = &t - } - case "article:expiration_time": - t, err := time.Parse(time.RFC3339, metaAttrs["content"]) - if err == nil { - og.Article.ExpirationTime = &t - } - case "article:section": - og.Article.Section = metaAttrs["content"] - case "article:tag": - og.Article.Tags = append(og.Article.Tags, metaAttrs["content"]) - case "article:author:first_name": - if len(og.Article.Authors) == 0 { - og.Article.Authors = append(og.Article.Authors, &Profile{}) - } - og.Article.Authors[len(og.Article.Authors)-1].FirstName = metaAttrs["content"] - case "article:author:last_name": - if len(og.Article.Authors) == 0 { - og.Article.Authors = append(og.Article.Authors, &Profile{}) - } - og.Article.Authors[len(og.Article.Authors)-1].LastName = metaAttrs["content"] - case "article:author:username": - if len(og.Article.Authors) == 0 { - og.Article.Authors = append(og.Article.Authors, &Profile{}) - } - og.Article.Authors[len(og.Article.Authors)-1].Username = metaAttrs["content"] - case "article:author:gender": - if len(og.Article.Authors) == 0 { - og.Article.Authors = append(og.Article.Authors, &Profile{}) - } - og.Article.Authors[len(og.Article.Authors)-1].Gender = metaAttrs["content"] - } -} - -func (og *OpenGraph) processBookMeta(metaAttrs map[string]string) { - if og.Book == nil { - og.Book = &Book{} - } - switch metaAttrs["property"] { - case "book:release_date": - t, err := time.Parse(time.RFC3339, metaAttrs["content"]) - if err == nil { - og.Book.ReleaseDate = &t - } - case "book:isbn": - og.Book.ISBN = metaAttrs["content"] - case "book:tag": - og.Book.Tags = append(og.Book.Tags, metaAttrs["content"]) - case "book:author:first_name": - if len(og.Book.Authors) == 0 { - og.Book.Authors = append(og.Book.Authors, &Profile{}) - } - og.Book.Authors[len(og.Book.Authors)-1].FirstName = metaAttrs["content"] - case "book:author:last_name": - if len(og.Book.Authors) == 0 { - og.Book.Authors = append(og.Book.Authors, &Profile{}) - } - og.Book.Authors[len(og.Book.Authors)-1].LastName = metaAttrs["content"] - case "book:author:username": - if len(og.Book.Authors) == 0 { - og.Book.Authors = append(og.Book.Authors, &Profile{}) - } - og.Book.Authors[len(og.Book.Authors)-1].Username = metaAttrs["content"] - case "book:author:gender": - if len(og.Book.Authors) == 0 { - og.Book.Authors = append(og.Book.Authors, &Profile{}) - } - og.Book.Authors[len(og.Book.Authors)-1].Gender = metaAttrs["content"] - } -} - -func (og *OpenGraph) processProfileMeta(metaAttrs map[string]string) { - if og.Profile == nil { - og.Profile = &Profile{} - } - switch metaAttrs["property"] { - case "profile:first_name": - og.Profile.FirstName = metaAttrs["content"] - case "profile:last_name": - og.Profile.LastName = metaAttrs["content"] - case "profile:username": - og.Profile.Username = metaAttrs["content"] - case "profile:gender": - og.Profile.Gender = metaAttrs["content"] - } -} diff --git a/vendor/github.com/miekg/dns/.codecov.yml b/vendor/github.com/miekg/dns/.codecov.yml deleted file mode 100644 index f91e5c1fe..000000000 --- a/vendor/github.com/miekg/dns/.codecov.yml +++ /dev/null @@ -1,8 +0,0 @@ -coverage: - status: - project: - default: - target: 40% - threshold: null - patch: false - changes: false diff --git a/vendor/github.com/miekg/dns/AUTHORS b/vendor/github.com/miekg/dns/AUTHORS deleted file mode 100644 index 196568352..000000000 --- a/vendor/github.com/miekg/dns/AUTHORS +++ /dev/null @@ -1 +0,0 @@ -Miek Gieben diff --git a/vendor/github.com/miekg/dns/CONTRIBUTORS b/vendor/github.com/miekg/dns/CONTRIBUTORS deleted file mode 100644 index 5903779d8..000000000 --- a/vendor/github.com/miekg/dns/CONTRIBUTORS +++ /dev/null @@ -1,10 +0,0 @@ -Alex A. Skinner -Andrew Tunnell-Jones -Ask Bjørn Hansen -Dave Cheney -Dusty Wilson -Marek Majkowski -Peter van Dijk -Omri Bahumi -Alex Sergeyev -James Hartig diff --git a/vendor/github.com/miekg/dns/COPYRIGHT b/vendor/github.com/miekg/dns/COPYRIGHT deleted file mode 100644 index 35702b10e..000000000 --- a/vendor/github.com/miekg/dns/COPYRIGHT +++ /dev/null @@ -1,9 +0,0 @@ -Copyright 2009 The Go Authors. All rights reserved. Use of this source code -is governed by a BSD-style license that can be found in the LICENSE file. -Extensions of the original work are copyright (c) 2011 Miek Gieben - -Copyright 2011 Miek Gieben. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. - -Copyright 2014 CloudFlare. All rights reserved. Use of this source code is -governed by a BSD-style license that can be found in the LICENSE file. diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock deleted file mode 100644 index 686632207..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.lock +++ /dev/null @@ -1,57 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870" - name = "golang.org/x/crypto" - packages = [ - "ed25519", - "ed25519/internal/edwards25519", - ] - pruneopts = "" - revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900" - -[[projects]] - branch = "master" - digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685" - name = "golang.org/x/net" - packages = [ - "bpf", - "context", - "internal/iana", - "internal/socket", - "ipv4", - "ipv6", - ] - pruneopts = "" - revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de" - -[[projects]] - branch = "master" - digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1" - name = "golang.org/x/sync" - packages = ["errgroup"] - pruneopts = "" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "" - revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "golang.org/x/crypto/ed25519", - "golang.org/x/net/ipv4", - "golang.org/x/net/ipv6", - "golang.org/x/sync/errgroup", - "golang.org/x/sys/unix", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml deleted file mode 100644 index 85e6ff31b..000000000 --- a/vendor/github.com/miekg/dns/Gopkg.toml +++ /dev/null @@ -1,38 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "golang.org/x/sys" - -[[constraint]] - branch = "master" - name = "golang.org/x/sync" diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE deleted file mode 100644 index 5763fa7fe..000000000 --- a/vendor/github.com/miekg/dns/LICENSE +++ /dev/null @@ -1,32 +0,0 @@ -Extensions of the original work are copyright (c) 2011 Miek Gieben - -As this is fork of the official Go code the same license applies: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - diff --git a/vendor/github.com/miekg/dns/Makefile.fuzz b/vendor/github.com/miekg/dns/Makefile.fuzz deleted file mode 100644 index dc158c4ac..000000000 --- a/vendor/github.com/miekg/dns/Makefile.fuzz +++ /dev/null @@ -1,33 +0,0 @@ -# Makefile for fuzzing -# -# Use go-fuzz and needs the tools installed. -# See https://blog.cloudflare.com/dns-parser-meet-go-fuzzer/ -# -# Installing go-fuzz: -# $ make -f Makefile.fuzz get -# Installs: -# * github.com/dvyukov/go-fuzz/go-fuzz -# * get github.com/dvyukov/go-fuzz/go-fuzz-build - -all: build - -.PHONY: build -build: - go-fuzz-build -tags fuzz github.com/miekg/dns - -.PHONY: build-newrr -build-newrr: - go-fuzz-build -func FuzzNewRR -tags fuzz github.com/miekg/dns - -.PHONY: fuzz -fuzz: - go-fuzz -bin=dns-fuzz.zip -workdir=fuzz - -.PHONY: get -get: - go get github.com/dvyukov/go-fuzz/go-fuzz - go get github.com/dvyukov/go-fuzz/go-fuzz-build - -.PHONY: clean -clean: - rm *-fuzz.zip diff --git a/vendor/github.com/miekg/dns/Makefile.release b/vendor/github.com/miekg/dns/Makefile.release deleted file mode 100644 index 8fb748e8a..000000000 --- a/vendor/github.com/miekg/dns/Makefile.release +++ /dev/null @@ -1,52 +0,0 @@ -# Makefile for releasing. -# -# The release is controlled from version.go. The version found there is -# used to tag the git repo, we're not building any artifects so there is nothing -# to upload to github. -# -# * Up the version in version.go -# * Run: make -f Makefile.release release -# * will *commit* your change with 'Release $VERSION' -# * push to github -# - -define GO -//+build ignore - -package main - -import ( - "fmt" - - "github.com/miekg/dns" -) - -func main() { - fmt.Println(dns.Version.String()) -} -endef - -$(file > version_release.go,$(GO)) -VERSION:=$(shell go run version_release.go) -TAG="v$(VERSION)" - -all: - @echo Use the \'release\' target to start a release $(VERSION) - rm -f version_release.go - -.PHONY: release -release: commit push - @echo Released $(VERSION) - rm -f version_release.go - -.PHONY: commit -commit: - @echo Committing release $(VERSION) - git commit -am"Release $(VERSION)" - git tag $(TAG) - -.PHONY: push -push: - @echo Pushing release $(VERSION) to master - git push --tags - git push diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md deleted file mode 100644 index fd3a2f544..000000000 --- a/vendor/github.com/miekg/dns/README.md +++ /dev/null @@ -1,169 +0,0 @@ -[![Build Status](https://travis-ci.org/miekg/dns.svg?branch=master)](https://travis-ci.org/miekg/dns) -[![Code Coverage](https://img.shields.io/codecov/c/github/miekg/dns/master.svg)](https://codecov.io/github/miekg/dns?branch=master) -[![Go Report Card](https://goreportcard.com/badge/github.com/miekg/dns)](https://goreportcard.com/report/miekg/dns) -[![](https://godoc.org/github.com/miekg/dns?status.svg)](https://godoc.org/github.com/miekg/dns) - -# Alternative (more granular) approach to a DNS library - -> Less is more. - -Complete and usable DNS library. All Resource Records are supported, including the DNSSEC types. -It follows a lean and mean philosophy. If there is stuff you should know as a DNS programmer there -isn't a convenience function for it. Server side and client side programming is supported, i.e. you -can build servers and resolvers with it. - -We try to keep the "master" branch as sane as possible and at the bleeding edge of standards, -avoiding breaking changes wherever reasonable. We support the last two versions of Go. - -# Goals - -* KISS; -* Fast; -* Small API. If it's easy to code in Go, don't make a function for it. - -# Users - -A not-so-up-to-date-list-that-may-be-actually-current: - -* https://github.com/coredns/coredns -* https://cloudflare.com -* https://github.com/abh/geodns -* http://www.statdns.com/ -* http://www.dnsinspect.com/ -* https://github.com/chuangbo/jianbing-dictionary-dns -* http://www.dns-lg.com/ -* https://github.com/fcambus/rrda -* https://github.com/kenshinx/godns -* https://github.com/skynetservices/skydns -* https://github.com/hashicorp/consul -* https://github.com/DevelopersPL/godnsagent -* https://github.com/duedil-ltd/discodns -* https://github.com/StalkR/dns-reverse-proxy -* https://github.com/tianon/rawdns -* https://mesosphere.github.io/mesos-dns/ -* https://pulse.turbobytes.com/ -* https://github.com/fcambus/statzone -* https://github.com/benschw/dns-clb-go -* https://github.com/corny/dnscheck for -* https://namesmith.io -* https://github.com/miekg/unbound -* https://github.com/miekg/exdns -* https://dnslookup.org -* https://github.com/looterz/grimd -* https://github.com/phamhongviet/serf-dns -* https://github.com/mehrdadrad/mylg -* https://github.com/bamarni/dockness -* https://github.com/fffaraz/microdns -* http://kelda.io -* https://github.com/ipdcode/hades -* https://github.com/StackExchange/dnscontrol/ -* https://www.dnsperf.com/ -* https://dnssectest.net/ -* https://dns.apebits.com -* https://github.com/oif/apex -* https://github.com/jedisct1/dnscrypt-proxy -* https://github.com/jedisct1/rpdns -* https://github.com/xor-gate/sshfp -* https://github.com/rs/dnstrace -* https://blitiri.com.ar/p/dnss ([github mirror](https://github.com/albertito/dnss)) -* https://github.com/semihalev/sdns - -Send pull request if you want to be listed here. - -# Features - -* UDP/TCP queries, IPv4 and IPv6 -* RFC 1035 zone file parsing ($INCLUDE, $ORIGIN, $TTL and $GENERATE (for all record types) are supported -* Fast -* Server side programming (mimicking the net/http package) -* Client side programming -* DNSSEC: signing, validating and key generation for DSA, RSA, ECDSA and Ed25519 -* EDNS0, NSID, Cookies -* AXFR/IXFR -* TSIG, SIG(0) -* DNS over TLS (DoT): encrypted connection between client and server over TCP -* DNS name compression - -Have fun! - -Miek Gieben - 2010-2012 - -DNS Authors 2012- - -# Building - -Building is done with the `go` tool. If you have setup your GOPATH correctly, the following should -work: - - go get github.com/miekg/dns - go build github.com/miekg/dns - -## Examples - -A short "how to use the API" is at the beginning of doc.go (this also will show -when you call `godoc github.com/miekg/dns`). - -Example programs can be found in the `github.com/miekg/exdns` repository. - -## Supported RFCs - -*all of them* - -* 103{4,5} - DNS standard -* 1348 - NSAP record (removed the record) -* 1982 - Serial Arithmetic -* 1876 - LOC record -* 1995 - IXFR -* 1996 - DNS notify -* 2136 - DNS Update (dynamic updates) -* 2181 - RRset definition - there is no RRset type though, just []RR -* 2537 - RSAMD5 DNS keys -* 2065 - DNSSEC (updated in later RFCs) -* 2671 - EDNS record -* 2782 - SRV record -* 2845 - TSIG record -* 2915 - NAPTR record -* 2929 - DNS IANA Considerations -* 3110 - RSASHA1 DNS keys -* 3225 - DO bit (DNSSEC OK) -* 340{1,2,3} - NAPTR record -* 3445 - Limiting the scope of (DNS)KEY -* 3597 - Unknown RRs -* 403{3,4,5} - DNSSEC + validation functions -* 4255 - SSHFP record -* 4343 - Case insensitivity -* 4408 - SPF record -* 4509 - SHA256 Hash in DS -* 4592 - Wildcards in the DNS -* 4635 - HMAC SHA TSIG -* 4701 - DHCID -* 4892 - id.server -* 5001 - NSID -* 5155 - NSEC3 record -* 5205 - HIP record -* 5702 - SHA2 in the DNS -* 5936 - AXFR -* 5966 - TCP implementation recommendations -* 6605 - ECDSA -* 6725 - IANA Registry Update -* 6742 - ILNP DNS -* 6840 - Clarifications and Implementation Notes for DNS Security -* 6844 - CAA record -* 6891 - EDNS0 update -* 6895 - DNS IANA considerations -* 6975 - Algorithm Understanding in DNSSEC -* 7043 - EUI48/EUI64 records -* 7314 - DNS (EDNS) EXPIRE Option -* 7477 - CSYNC RR -* 7828 - edns-tcp-keepalive EDNS0 Option -* 7553 - URI record -* 7858 - DNS over TLS: Initiation and Performance Considerations -* 7871 - EDNS0 Client Subnet -* 7873 - Domain Name System (DNS) Cookies (draft-ietf-dnsop-cookies) -* 8080 - EdDSA for DNSSEC - -## Loosely Based Upon - -* ldns - -* NSD - -* Net::DNS - -* GRONG - diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go deleted file mode 100644 index fcc6104f2..000000000 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ /dev/null @@ -1,54 +0,0 @@ -package dns - -// MsgAcceptFunc is used early in the server code to accept or reject a message with RcodeFormatError. -// It returns a MsgAcceptAction to indicate what should happen with the message. -type MsgAcceptFunc func(dh Header) MsgAcceptAction - -// DefaultMsgAcceptFunc checks the request and will reject if: -// -// * isn't a request (don't respond in that case). -// * opcode isn't OpcodeQuery or OpcodeNotify -// * Zero bit isn't zero -// * has more than 1 question in the question section -// * has more than 0 RRs in the Answer section -// * has more than 0 RRs in the Authority section -// * has more than 2 RRs in the Additional section -var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc - -// MsgAcceptAction represents the action to be taken. -type MsgAcceptAction int - -const ( - MsgAccept MsgAcceptAction = iota // Accept the message - MsgReject // Reject the message with a RcodeFormatError - MsgIgnore // Ignore the error and send nothing back. -) - -var defaultMsgAcceptFunc = func(dh Header) MsgAcceptAction { - if isResponse := dh.Bits&_QR != 0; isResponse { - return MsgIgnore - } - - // Don't allow dynamic updates, because then the sections can contain a whole bunch of RRs. - opcode := int(dh.Bits>>11) & 0xF - if opcode != OpcodeQuery && opcode != OpcodeNotify { - return MsgReject - } - - if isZero := dh.Bits&_Z != 0; isZero { - return MsgReject - } - if dh.Qdcount != 1 { - return MsgReject - } - if dh.Ancount != 0 { - return MsgReject - } - if dh.Nscount != 0 { - return MsgReject - } - if dh.Arcount > 2 { - return MsgReject - } - return MsgAccept -} diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go deleted file mode 100644 index 770a946cd..000000000 --- a/vendor/github.com/miekg/dns/client.go +++ /dev/null @@ -1,496 +0,0 @@ -package dns - -// A client implementation. - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/binary" - "io" - "net" - "strings" - "time" -) - -const ( - dnsTimeout time.Duration = 2 * time.Second - tcpIdleTimeout time.Duration = 8 * time.Second -) - -// A Conn represents a connection to a DNS server. -type Conn struct { - net.Conn // a net.Conn holding the connection - UDPSize uint16 // minimum receive buffer for UDP messages - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - tsigRequestMAC string -} - -// A Client defines parameters for a DNS client. -type Client struct { - Net string // if "tcp" or "tcp-tls" (DNS over TLS) a TCP query will be initiated, otherwise an UDP one (default is "" for UDP) - UDPSize uint16 // minimum receive buffer for UDP messages - TLSConfig *tls.Config // TLS connection configuration - Dialer *net.Dialer // a net.Dialer used to set local address, timeouts and more - // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, - // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and - // Client.Dialer) or context.Context.Deadline (see the deprecated ExchangeContext) - Timeout time.Duration - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight -} - -// Exchange performs a synchronous UDP query. It sends the message m to the address -// contained in a and waits for a reply. Exchange does not retry a failed query, nor -// will it fall back to TCP in case of truncation. -// See client.Exchange for more information on setting larger buffer sizes. -func Exchange(m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.Exchange(m, a) - return r, err -} - -func (c *Client) dialTimeout() time.Duration { - if c.Timeout != 0 { - return c.Timeout - } - if c.DialTimeout != 0 { - return c.DialTimeout - } - return dnsTimeout -} - -func (c *Client) readTimeout() time.Duration { - if c.ReadTimeout != 0 { - return c.ReadTimeout - } - return dnsTimeout -} - -func (c *Client) writeTimeout() time.Duration { - if c.WriteTimeout != 0 { - return c.WriteTimeout - } - return dnsTimeout -} - -// Dial connects to the address on the named network. -func (c *Client) Dial(address string) (conn *Conn, err error) { - // create a new dialer with the appropriate timeout - var d net.Dialer - if c.Dialer == nil { - d = net.Dialer{Timeout: c.getTimeoutForRequest(c.dialTimeout())} - } else { - d = *c.Dialer - } - - network := c.Net - if network == "" { - network = "udp" - } - - useTLS := strings.HasPrefix(network, "tcp") && strings.HasSuffix(network, "-tls") - - conn = new(Conn) - if useTLS { - network = strings.TrimSuffix(network, "-tls") - - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) - } else { - conn.Conn, err = d.Dial(network, address) - } - if err != nil { - return nil, err - } - - return conn, nil -} - -// Exchange performs a synchronous query. It sends the message m to the address -// contained in a and waits for a reply. Basic use pattern with a *dns.Client: -// -// c := new(dns.Client) -// in, rtt, err := c.Exchange(message, "127.0.0.1:53") -// -// Exchange does not retry a failed query, nor will it fall back to TCP in -// case of truncation. -// It is up to the caller to create a message that allows for larger responses to be -// returned. Specifically this means adding an EDNS0 OPT RR that will advertise a larger -// buffer, see SetEdns0. Messages without an OPT RR will fallback to the historic limit -// of 512 bytes -// To specify a local address or a timeout, the caller has to set the `Client.Dialer` -// attribute appropriately -func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchange(m, address) - } - - t := "nop" - if t1, ok := TypeToString[m.Question[0].Qtype]; ok { - t = t1 - } - cl := "nop" - if cl1, ok := ClassToString[m.Question[0].Qclass]; ok { - cl = cl1 - } - r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) { - return c.exchange(m, address) - }) - if r != nil && shared { - r = r.Copy() - } - return r, rtt, err -} - -func (c *Client) exchange(m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var co *Conn - - co, err = c.Dial(a) - - if err != nil { - return nil, 0, err - } - defer co.Close() - - opt := m.IsEdns0() - // If EDNS0 is used use that for size. - if opt != nil && opt.UDPSize() >= MinMsgSize { - co.UDPSize = opt.UDPSize() - } - // Otherwise use the client's configured UDP size. - if opt == nil && c.UDPSize >= MinMsgSize { - co.UDPSize = c.UDPSize - } - - co.TsigSecret = c.TsigSecret - t := time.Now() - // write with the appropriate write timeout - co.SetWriteDeadline(t.Add(c.getTimeoutForRequest(c.writeTimeout()))) - if err = co.WriteMsg(m); err != nil { - return nil, 0, err - } - - co.SetReadDeadline(time.Now().Add(c.getTimeoutForRequest(c.readTimeout()))) - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - rtt = time.Since(t) - return r, rtt, err -} - -// ReadMsg reads a message from the connection co. -// If the received message contains a TSIG record the transaction signature -// is verified. This method always tries to return the message, however if an -// error is returned there are no guarantees that the returned message is a -// valid representation of the packet read. -func (co *Conn) ReadMsg() (*Msg, error) { - p, err := co.ReadMsgHeader(nil) - if err != nil { - return nil, err - } - - m := new(Msg) - if err := m.Unpack(p); err != nil { - // If an error was returned, we still want to allow the user to use - // the message, but naively they can just check err if they don't want - // to use an erroneous message - return m, err - } - if t := m.IsTsig(); t != nil { - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - } - return m, err -} - -// ReadMsgHeader reads a DNS message, parses and populates hdr (when hdr is not nil). -// Returns message as a byte slice to be parsed with Msg.Unpack later on. -// Note that error handling on the message body is not possible as only the header is parsed. -func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) { - var ( - p []byte - n int - err error - ) - - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - // First two bytes specify the length of the entire message. - l, err := tcpMsgLen(r) - if err != nil { - return nil, err - } - p = make([]byte, l) - n, err = tcpRead(r, p) - default: - if co.UDPSize > MinMsgSize { - p = make([]byte, co.UDPSize) - } else { - p = make([]byte, MinMsgSize) - } - n, err = co.Read(p) - } - - if err != nil { - return nil, err - } else if n < headerSize { - return nil, ErrShortRead - } - - p = p[:n] - if hdr != nil { - dh, _, err := unpackMsgHdr(p, 0) - if err != nil { - return nil, err - } - *hdr = dh - } - return p, err -} - -// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length. -func tcpMsgLen(t io.Reader) (int, error) { - p := []byte{0, 0} - n, err := t.Read(p) - if err != nil { - return 0, err - } - - // As seen with my local router/switch, returns 1 byte on the above read, - // resulting a a ShortRead. Just write it out (instead of loop) and read the - // other byte. - if n == 1 { - n1, err := t.Read(p[1:]) - if err != nil { - return 0, err - } - n += n1 - } - - if n != 2 { - return 0, ErrShortRead - } - l := binary.BigEndian.Uint16(p) - if l == 0 { - return 0, ErrShortRead - } - return int(l), nil -} - -// tcpRead calls TCPConn.Read enough times to fill allocated buffer. -func tcpRead(t io.Reader, p []byte) (int, error) { - n, err := t.Read(p) - if err != nil { - return n, err - } - for n < len(p) { - j, err := t.Read(p[n:]) - if err != nil { - return n, err - } - n += j - } - return n, err -} - -// Read implements the net.Conn read method. -func (co *Conn) Read(p []byte) (n int, err error) { - if co.Conn == nil { - return 0, ErrConnEmpty - } - if len(p) < 2 { - return 0, io.ErrShortBuffer - } - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - r := t.(io.Reader) - - l, err := tcpMsgLen(r) - if err != nil { - return 0, err - } - if l > len(p) { - return int(l), io.ErrShortBuffer - } - return tcpRead(r, p[:l]) - } - // UDP connection - n, err = co.Conn.Read(p) - if err != nil { - return n, err - } - return n, err -} - -// WriteMsg sends a message through the connection co. -// If the message m contains a TSIG record the transaction -// signature is calculated. -func (co *Conn) WriteMsg(m *Msg) (err error) { - var out []byte - if t := m.IsTsig(); t != nil { - mac := "" - if _, ok := co.TsigSecret[t.Hdr.Name]; !ok { - return ErrSecret - } - out, mac, err = TsigGenerate(m, co.TsigSecret[t.Hdr.Name], co.tsigRequestMAC, false) - // Set for the next read, although only used in zone transfers - co.tsigRequestMAC = mac - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - if _, err = co.Write(out); err != nil { - return err - } - return nil -} - -// Write implements the net.Conn Write method. -func (co *Conn) Write(p []byte) (n int, err error) { - switch t := co.Conn.(type) { - case *net.TCPConn, *tls.Conn: - w := t.(io.Writer) - - lp := len(p) - if lp < 2 { - return 0, io.ErrShortBuffer - } - if lp > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, lp+2) - binary.BigEndian.PutUint16(l, uint16(lp)) - p = append(l, p...) - n, err := io.Copy(w, bytes.NewReader(p)) - return int(n), err - } - n, err = co.Conn.Write(p) - return n, err -} - -// Return the appropriate timeout for a specific request -func (c *Client) getTimeoutForRequest(timeout time.Duration) time.Duration { - var requestTimeout time.Duration - if c.Timeout != 0 { - requestTimeout = c.Timeout - } else { - requestTimeout = timeout - } - // net.Dialer.Timeout has priority if smaller than the timeouts computed so - // far - if c.Dialer != nil && c.Dialer.Timeout != 0 { - if c.Dialer.Timeout < requestTimeout { - requestTimeout = c.Dialer.Timeout - } - } - return requestTimeout -} - -// Dial connects to the address on the named network. -func Dial(network, address string) (conn *Conn, err error) { - conn = new(Conn) - conn.Conn, err = net.Dial(network, address) - if err != nil { - return nil, err - } - return conn, nil -} - -// ExchangeContext performs a synchronous UDP query, like Exchange. It -// additionally obeys deadlines from the passed Context. -func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) { - client := Client{Net: "udp"} - r, _, err = client.ExchangeContext(ctx, m, a) - // ignorint rtt to leave the original ExchangeContext API unchanged, but - // this function will go away - return r, err -} - -// ExchangeConn performs a synchronous query. It sends the message m via the connection -// c and waits for a reply. The connection c is not closed by ExchangeConn. -// This function is going away, but can easily be mimicked: -// -// co := &dns.Conn{Conn: c} // c is your net.Conn -// co.WriteMsg(m) -// in, _ := co.ReadMsg() -// co.Close() -// -func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { - println("dns: ExchangeConn: this function is deprecated") - co := new(Conn) - co.Conn = c - if err = co.WriteMsg(m); err != nil { - return nil, err - } - r, err = co.ReadMsg() - if err == nil && r.Id != m.Id { - err = ErrId - } - return r, err -} - -// DialTimeout acts like Dial but takes a timeout. -func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} - conn, err = client.Dial(address) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialWithTLS connects to the address on the named network with TLS. -func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, TLSConfig: tlsConfig} - conn, err = client.Dial(address) - - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. -func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout time.Duration) (conn *Conn, err error) { - if !strings.HasSuffix(network, "-tls") { - network += "-tls" - } - client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} - conn, err = client.Dial(address) - if err != nil { - return nil, err - } - return conn, nil -} - -// ExchangeContext acts like Exchange, but honors the deadline on the provided -// context, if present. If there is both a context deadline and a configured -// timeout on the client, the earliest of the two takes effect. -func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, rtt time.Duration, err error) { - var timeout time.Duration - if deadline, ok := ctx.Deadline(); !ok { - timeout = 0 - } else { - timeout = time.Until(deadline) - } - // not passing the context to the underlying calls, as the API does not support - // context. For timeouts you should set up Client.Dialer and call Client.Exchange. - // TODO(tmthrgd,miekg): this is a race condition. - c.Dialer = &net.Dialer{Timeout: timeout} - return c.Exchange(m, a) -} diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go deleted file mode 100644 index f13cfa30c..000000000 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ /dev/null @@ -1,139 +0,0 @@ -package dns - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" -) - -// ClientConfig wraps the contents of the /etc/resolv.conf file. -type ClientConfig struct { - Servers []string // servers to use - Search []string // suffixes to append to local name - Port string // what port to use - Ndots int // number of dots in name to trigger absolute lookup - Timeout int // seconds before giving up on packet - Attempts int // lost packets before giving up on server, not used in the package dns -} - -// ClientConfigFromFile parses a resolv.conf(5) like file and returns -// a *ClientConfig. -func ClientConfigFromFile(resolvconf string) (*ClientConfig, error) { - file, err := os.Open(resolvconf) - if err != nil { - return nil, err - } - defer file.Close() - return ClientConfigFromReader(file) -} - -// ClientConfigFromReader works like ClientConfigFromFile but takes an io.Reader as argument -func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { - c := new(ClientConfig) - scanner := bufio.NewScanner(resolvconf) - c.Servers = make([]string, 0) - c.Search = make([]string, 0) - c.Port = "53" - c.Ndots = 1 - c.Timeout = 5 - c.Attempts = 2 - - for scanner.Scan() { - if err := scanner.Err(); err != nil { - return nil, err - } - line := scanner.Text() - f := strings.Fields(line) - if len(f) < 1 { - continue - } - switch f[0] { - case "nameserver": // add one name server - if len(f) > 1 { - // One more check: make sure server name is - // just an IP address. Otherwise we need DNS - // to look it up. - name := f[1] - c.Servers = append(c.Servers, name) - } - - case "domain": // set search path to just this domain - if len(f) > 1 { - c.Search = make([]string, 1) - c.Search[0] = f[1] - } else { - c.Search = make([]string, 0) - } - - case "search": // set search path to given servers - c.Search = make([]string, len(f)-1) - for i := 0; i < len(c.Search); i++ { - c.Search[i] = f[i+1] - } - - case "options": // magic options - for i := 1; i < len(f); i++ { - s := f[i] - switch { - case len(s) >= 6 && s[:6] == "ndots:": - n, _ := strconv.Atoi(s[6:]) - if n < 0 { - n = 0 - } else if n > 15 { - n = 15 - } - c.Ndots = n - case len(s) >= 8 && s[:8] == "timeout:": - n, _ := strconv.Atoi(s[8:]) - if n < 1 { - n = 1 - } - c.Timeout = n - case len(s) >= 9 && s[:9] == "attempts:": - n, _ := strconv.Atoi(s[9:]) - if n < 1 { - n = 1 - } - c.Attempts = n - case s == "rotate": - /* not imp */ - } - } - } - } - return c, nil -} - -// NameList returns all of the names that should be queried based on the -// config. It is based off of go's net/dns name building, but it does not -// check the length of the resulting names. -func (c *ClientConfig) NameList(name string) []string { - // if this domain is already fully qualified, no append needed. - if IsFqdn(name) { - return []string{name} - } - - // Check to see if the name has more labels than Ndots. Do this before making - // the domain fully qualified. - hasNdots := CountLabel(name) > c.Ndots - // Make the domain fully qualified. - name = Fqdn(name) - - // Make a list of names based off search. - names := []string{} - - // If name has enough dots, try that first. - if hasNdots { - names = append(names, name) - } - for _, s := range c.Search { - names = append(names, Fqdn(name+s)) - } - // If we didn't have enough dots, try after suffixes. - if !hasNdots { - names = append(names, name) - } - return names -} diff --git a/vendor/github.com/miekg/dns/dane.go b/vendor/github.com/miekg/dns/dane.go deleted file mode 100644 index 8c4a14ef1..000000000 --- a/vendor/github.com/miekg/dns/dane.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/sha512" - "crypto/x509" - "encoding/hex" - "errors" -) - -// CertificateToDANE converts a certificate to a hex string as used in the TLSA or SMIMEA records. -func CertificateToDANE(selector, matchingType uint8, cert *x509.Certificate) (string, error) { - switch matchingType { - case 0: - switch selector { - case 0: - return hex.EncodeToString(cert.Raw), nil - case 1: - return hex.EncodeToString(cert.RawSubjectPublicKeyInfo), nil - } - case 1: - h := sha256.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - case 2: - h := sha512.New() - switch selector { - case 0: - h.Write(cert.Raw) - return hex.EncodeToString(h.Sum(nil)), nil - case 1: - h.Write(cert.RawSubjectPublicKeyInfo) - return hex.EncodeToString(h.Sum(nil)), nil - } - } - return "", errors.New("dns: bad MatchingType or Selector") -} diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go deleted file mode 100644 index 1778b1cad..000000000 --- a/vendor/github.com/miekg/dns/defaults.go +++ /dev/null @@ -1,288 +0,0 @@ -package dns - -import ( - "errors" - "net" - "strconv" -) - -const hexDigit = "0123456789abcdef" - -// Everything is assumed in ClassINET. - -// SetReply creates a reply message from a request message. -func (dns *Msg) SetReply(request *Msg) *Msg { - dns.Id = request.Id - dns.Response = true - dns.Opcode = request.Opcode - if dns.Opcode == OpcodeQuery { - dns.RecursionDesired = request.RecursionDesired // Copy rd bit - dns.CheckingDisabled = request.CheckingDisabled // Copy cd bit - } - dns.Rcode = RcodeSuccess - if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] - } - return dns -} - -// SetQuestion creates a question message, it sets the Question -// section, generates an Id and sets the RecursionDesired (RD) -// bit to true. -func (dns *Msg) SetQuestion(z string, t uint16) *Msg { - dns.Id = Id() - dns.RecursionDesired = true - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, t, ClassINET} - return dns -} - -// SetNotify creates a notify message, it sets the Question -// section, generates an Id and sets the Authoritative (AA) -// bit to true. -func (dns *Msg) SetNotify(z string) *Msg { - dns.Opcode = OpcodeNotify - dns.Authoritative = true - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetRcode creates an error message suitable for the request. -func (dns *Msg) SetRcode(request *Msg, rcode int) *Msg { - dns.SetReply(request) - dns.Rcode = rcode - return dns -} - -// SetRcodeFormatError creates a message with FormError set. -func (dns *Msg) SetRcodeFormatError(request *Msg) *Msg { - dns.Rcode = RcodeFormatError - dns.Opcode = OpcodeQuery - dns.Response = true - dns.Authoritative = false - dns.Id = request.Id - return dns -} - -// SetUpdate makes the message a dynamic update message. It -// sets the ZONE section to: z, TypeSOA, ClassINET. -func (dns *Msg) SetUpdate(z string) *Msg { - dns.Id = Id() - dns.Response = false - dns.Opcode = OpcodeUpdate - dns.Compress = false // BIND9 cannot handle compression - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeSOA, ClassINET} - return dns -} - -// SetIxfr creates message for requesting an IXFR. -func (dns *Msg) SetIxfr(z string, serial uint32, ns, mbox string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Ns = make([]RR, 1) - s := new(SOA) - s.Hdr = RR_Header{z, TypeSOA, ClassINET, defaultTtl, 0} - s.Serial = serial - s.Ns = ns - s.Mbox = mbox - dns.Question[0] = Question{z, TypeIXFR, ClassINET} - dns.Ns[0] = s - return dns -} - -// SetAxfr creates message for requesting an AXFR. -func (dns *Msg) SetAxfr(z string) *Msg { - dns.Id = Id() - dns.Question = make([]Question, 1) - dns.Question[0] = Question{z, TypeAXFR, ClassINET} - return dns -} - -// SetTsig appends a TSIG RR to the message. -// This is only a skeleton TSIG RR that is added as the last RR in the -// additional section. The Tsig is calculated when the message is being send. -func (dns *Msg) SetTsig(z, algo string, fudge uint16, timesigned int64) *Msg { - t := new(TSIG) - t.Hdr = RR_Header{z, TypeTSIG, ClassANY, 0, 0} - t.Algorithm = algo - t.Fudge = fudge - t.TimeSigned = uint64(timesigned) - t.OrigId = dns.Id - dns.Extra = append(dns.Extra, t) - return dns -} - -// SetEdns0 appends a EDNS0 OPT RR to the message. -// TSIG should always the last RR in a message. -func (dns *Msg) SetEdns0(udpsize uint16, do bool) *Msg { - e := new(OPT) - e.Hdr.Name = "." - e.Hdr.Rrtype = TypeOPT - e.SetUDPSize(udpsize) - if do { - e.SetDo() - } - dns.Extra = append(dns.Extra, e) - return dns -} - -// IsTsig checks if the message has a TSIG record as the last record -// in the additional section. It returns the TSIG record found or nil. -func (dns *Msg) IsTsig() *TSIG { - if len(dns.Extra) > 0 { - if dns.Extra[len(dns.Extra)-1].Header().Rrtype == TypeTSIG { - return dns.Extra[len(dns.Extra)-1].(*TSIG) - } - } - return nil -} - -// IsEdns0 checks if the message has a EDNS0 (OPT) record, any EDNS0 -// record in the additional section will do. It returns the OPT record -// found or nil. -func (dns *Msg) IsEdns0() *OPT { - // EDNS0 is at the end of the additional section, start there. - // We might want to change this to *only* look at the last two - // records. So we see TSIG and/or OPT - this a slightly bigger - // change though. - for i := len(dns.Extra) - 1; i >= 0; i-- { - if dns.Extra[i].Header().Rrtype == TypeOPT { - return dns.Extra[i].(*OPT) - } - } - return nil -} - -// IsDomainName checks if s is a valid domain name, it returns the number of -// labels and true, when a domain name is valid. Note that non fully qualified -// domain name is considered valid, in this case the last label is counted in -// the number of labels. When false is returned the number of labels is not -// defined. Also note that this function is extremely liberal; almost any -// string is a valid domain name as the DNS is 8 bit protocol. It checks if each -// label fits in 63 characters, but there is no length check for the entire -// string s. I.e. a domain name longer than 255 characters is considered valid. -func IsDomainName(s string) (labels int, ok bool) { - _, labels, err := packDomainName(s, nil, 0, compressionMap{}, false) - return labels, err == nil -} - -// IsSubDomain checks if child is indeed a child of the parent. If child and parent -// are the same domain true is returned as well. -func IsSubDomain(parent, child string) bool { - // Entire child is contained in parent - return CompareDomainName(parent, child) == CountLabel(parent) -} - -// IsMsg sanity checks buf and returns an error if it isn't a valid DNS packet. -// The checking is performed on the binary payload. -func IsMsg(buf []byte) error { - // Header - if len(buf) < 12 { - return errors.New("dns: bad message header") - } - // Header: Opcode - // TODO(miek): more checks here, e.g. check all header bits. - return nil -} - -// IsFqdn checks if a domain name is fully qualified. -func IsFqdn(s string) bool { - l := len(s) - if l == 0 { - return false - } - return s[l-1] == '.' -} - -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. -func IsRRset(rrset []RR) bool { - if len(rrset) == 0 { - return false - } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name - - for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { - // Mismatch between the records, so this is not a valid rrset for - //signing/verifying - return false - } - } - - return true -} - -// Fqdn return the fully qualified domain name from s. -// If s is already fully qualified, it behaves as the identity function. -func Fqdn(s string) string { - if IsFqdn(s) { - return s - } - return s + "." -} - -// Copied from the official Go code. - -// ReverseAddr returns the in-addr.arpa. or ip6.arpa. hostname of the IP -// address suitable for reverse DNS (PTR) record lookups or an error if it fails -// to parse the IP address. -func ReverseAddr(addr string) (arpa string, err error) { - ip := net.ParseIP(addr) - if ip == nil { - return "", &Error{err: "unrecognized address: " + addr} - } - if ip.To4() != nil { - return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + - strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil - } - // Must be IPv6 - buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) - // Add it, in reverse, to the buffer - for i := len(ip) - 1; i >= 0; i-- { - v := ip[i] - buf = append(buf, hexDigit[v&0xF]) - buf = append(buf, '.') - buf = append(buf, hexDigit[v>>4]) - buf = append(buf, '.') - } - // Append "ip6.arpa." and return (buf already has the final .) - buf = append(buf, "ip6.arpa."...) - return string(buf), nil -} - -// String returns the string representation for the type t. -func (t Type) String() string { - if t1, ok := TypeToString[uint16(t)]; ok { - return t1 - } - return "TYPE" + strconv.Itoa(int(t)) -} - -// String returns the string representation for the class c. -func (c Class) String() string { - if s, ok := ClassToString[uint16(c)]; ok { - // Only emit mnemonics when they are unambiguous, specically ANY is in both. - if _, ok := StringToType[s]; !ok { - return s - } - } - return "CLASS" + strconv.Itoa(int(c)) -} - -// String returns the string representation for the name n. -func (n Name) String() string { - return sprintName(string(n)) -} diff --git a/vendor/github.com/miekg/dns/dns.go b/vendor/github.com/miekg/dns/dns.go deleted file mode 100644 index aefffa793..000000000 --- a/vendor/github.com/miekg/dns/dns.go +++ /dev/null @@ -1,103 +0,0 @@ -package dns - -import "strconv" - -const ( - year68 = 1 << 31 // For RFC1982 (Serial Arithmetic) calculations in 32 bits. - defaultTtl = 3600 // Default internal TTL. - - // DefaultMsgSize is the standard default for messages larger than 512 bytes. - DefaultMsgSize = 4096 - // MinMsgSize is the minimal size of a DNS packet. - MinMsgSize = 512 - // MaxMsgSize is the largest possible DNS packet. - MaxMsgSize = 65535 -) - -// Error represents a DNS error. -type Error struct{ err string } - -func (e *Error) Error() string { - if e == nil { - return "dns: " - } - return "dns: " + e.err -} - -// An RR represents a resource record. -type RR interface { - // Header returns the header of an resource record. The header contains - // everything up to the rdata. - Header() *RR_Header - // String returns the text representation of the resource record. - String() string - - // copy returns a copy of the RR - copy() RR - - // len returns the length (in octets) of the compressed or uncompressed RR in wire format. - // - // If compression is nil, the uncompressed size will be returned, otherwise the compressed - // size will be returned and domain names will be added to the map for future compression. - len(off int, compression map[string]struct{}) int - - // pack packs an RR into wire format. - pack(msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) -} - -// RR_Header is the header all DNS resource records share. -type RR_Header struct { - Name string `dns:"cdomain-name"` - Rrtype uint16 - Class uint16 - Ttl uint32 - Rdlength uint16 // Length of data after header. -} - -// Header returns itself. This is here to make RR_Header implements the RR interface. -func (h *RR_Header) Header() *RR_Header { return h } - -// Just to implement the RR interface. -func (h *RR_Header) copy() RR { return nil } - -func (h *RR_Header) String() string { - var s string - - if h.Rrtype == TypeOPT { - s = ";" - // and maybe other things - } - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += Class(h.Class).String() + "\t" - s += Type(h.Rrtype).String() + "\t" - return s -} - -func (h *RR_Header) len(off int, compression map[string]struct{}) int { - l := domainNameLen(h.Name, off, compression, true) - l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) - return l -} - -// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. -func (rr *RFC3597) ToRFC3597(r RR) error { - buf := make([]byte, Len(r)*2) - headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false) - if err != nil { - return err - } - buf = buf[:off] - - hdr := *r.Header() - hdr.Rdlength = uint16(off - headerEnd) - - rfc3597, _, err := unpackRFC3597(hdr, buf, headerEnd) - if err != nil { - return err - } - - *rr = *rfc3597.(*RFC3597) - return nil -} diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go deleted file mode 100644 index 9b39d4273..000000000 --- a/vendor/github.com/miekg/dns/dnssec.go +++ /dev/null @@ -1,801 +0,0 @@ -package dns - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - _ "crypto/md5" - "crypto/rand" - "crypto/rsa" - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "math/big" - "sort" - "strings" - "time" - - "golang.org/x/crypto/ed25519" -) - -// DNSSEC encryption algorithm codes. -const ( - _ uint8 = iota - RSAMD5 - DH - DSA - _ // Skip 4, RFC 6725, section 2.1 - RSASHA1 - DSANSEC3SHA1 - RSASHA1NSEC3SHA1 - RSASHA256 - _ // Skip 9, RFC 6725, section 2.1 - RSASHA512 - _ // Skip 11, RFC 6725, section 2.1 - ECCGOST - ECDSAP256SHA256 - ECDSAP384SHA384 - ED25519 - ED448 - INDIRECT uint8 = 252 - PRIVATEDNS uint8 = 253 // Private (experimental keys) - PRIVATEOID uint8 = 254 -) - -// AlgorithmToString is a map of algorithm IDs to algorithm names. -var AlgorithmToString = map[uint8]string{ - RSAMD5: "RSAMD5", - DH: "DH", - DSA: "DSA", - RSASHA1: "RSASHA1", - DSANSEC3SHA1: "DSA-NSEC3-SHA1", - RSASHA1NSEC3SHA1: "RSASHA1-NSEC3-SHA1", - RSASHA256: "RSASHA256", - RSASHA512: "RSASHA512", - ECCGOST: "ECC-GOST", - ECDSAP256SHA256: "ECDSAP256SHA256", - ECDSAP384SHA384: "ECDSAP384SHA384", - ED25519: "ED25519", - ED448: "ED448", - INDIRECT: "INDIRECT", - PRIVATEDNS: "PRIVATEDNS", - PRIVATEOID: "PRIVATEOID", -} - -// StringToAlgorithm is the reverse of AlgorithmToString. -var StringToAlgorithm = reverseInt8(AlgorithmToString) - -// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. -var AlgorithmToHash = map[uint8]crypto.Hash{ - RSAMD5: crypto.MD5, // Deprecated in RFC 6725 - DSA: crypto.SHA1, - RSASHA1: crypto.SHA1, - RSASHA1NSEC3SHA1: crypto.SHA1, - RSASHA256: crypto.SHA256, - ECDSAP256SHA256: crypto.SHA256, - ECDSAP384SHA384: crypto.SHA384, - RSASHA512: crypto.SHA512, - ED25519: crypto.Hash(0), -} - -// DNSSEC hashing algorithm codes. -const ( - _ uint8 = iota - SHA1 // RFC 4034 - SHA256 // RFC 4509 - GOST94 // RFC 5933 - SHA384 // Experimental - SHA512 // Experimental -) - -// HashToString is a map of hash IDs to names. -var HashToString = map[uint8]string{ - SHA1: "SHA1", - SHA256: "SHA256", - GOST94: "GOST94", - SHA384: "SHA384", - SHA512: "SHA512", -} - -// StringToHash is a map of names to hash IDs. -var StringToHash = reverseInt8(HashToString) - -// DNSKEY flag values. -const ( - SEP = 1 - REVOKE = 1 << 7 - ZONE = 1 << 8 -) - -// The RRSIG needs to be converted to wireformat with some of the rdata (the signature) missing. -type rrsigWireFmt struct { - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - /* No Signature */ -} - -// Used for converting DNSKEY's rdata to wirefmt. -type dnskeyWireFmt struct { - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` - /* Nothing is left out */ -} - -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - -// KeyTag calculates the keytag (or key-id) of the DNSKEY. -func (k *DNSKEY) KeyTag() uint16 { - if k == nil { - return 0 - } - var keytag int - switch k.Algorithm { - case RSAMD5: - // Look at the bottom two bytes of the modules, which the last - // item in the pubkey. We could do this faster by looking directly - // at the base64 values. But I'm lazy. - modulus, _ := fromBase64([]byte(k.PublicKey)) - if len(modulus) > 1 { - x := binary.BigEndian.Uint16(modulus[len(modulus)-2:]) - keytag = int(x) - } - default: - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return 0 - } - wire = wire[:n] - for i, v := range wire { - if i&1 != 0 { - keytag += int(v) // must be larger than uint32 - } else { - keytag += int(v) << 8 - } - } - keytag += keytag >> 16 & 0xFFFF - keytag &= 0xFFFF - } - return uint16(keytag) -} - -// ToDS converts a DNSKEY record to a DS record. -func (k *DNSKEY) ToDS(h uint8) *DS { - if k == nil { - return nil - } - ds := new(DS) - ds.Hdr.Name = k.Hdr.Name - ds.Hdr.Class = k.Hdr.Class - ds.Hdr.Rrtype = TypeDS - ds.Hdr.Ttl = k.Hdr.Ttl - ds.Algorithm = k.Algorithm - ds.DigestType = h - ds.KeyTag = k.KeyTag() - - keywire := new(dnskeyWireFmt) - keywire.Flags = k.Flags - keywire.Protocol = k.Protocol - keywire.Algorithm = k.Algorithm - keywire.PublicKey = k.PublicKey - wire := make([]byte, DefaultMsgSize) - n, err := packKeyWire(keywire, wire) - if err != nil { - return nil - } - wire = wire[:n] - - owner := make([]byte, 255) - off, err1 := PackDomainName(strings.ToLower(k.Hdr.Name), owner, 0, nil, false) - if err1 != nil { - return nil - } - owner = owner[:off] - // RFC4034: - // digest = digest_algorithm( DNSKEY owner name | DNSKEY RDATA); - // "|" denotes concatenation - // DNSKEY RDATA = Flags | Protocol | Algorithm | Public Key. - - var hash crypto.Hash - switch h { - case SHA1: - hash = crypto.SHA1 - case SHA256: - hash = crypto.SHA256 - case SHA384: - hash = crypto.SHA384 - case SHA512: - hash = crypto.SHA512 - default: - return nil - } - - s := hash.New() - s.Write(owner) - s.Write(wire) - ds.Digest = hex.EncodeToString(s.Sum(nil)) - return ds -} - -// ToCDNSKEY converts a DNSKEY record to a CDNSKEY record. -func (k *DNSKEY) ToCDNSKEY() *CDNSKEY { - c := &CDNSKEY{DNSKEY: *k} - c.Hdr = k.Hdr - c.Hdr.Rrtype = TypeCDNSKEY - return c -} - -// ToCDS converts a DS record to a CDS record. -func (d *DS) ToCDS() *CDS { - c := &CDS{DS: *d} - c.Hdr = d.Hdr - c.Hdr.Rrtype = TypeCDS - return c -} - -// Sign signs an RRSet. The signature needs to be filled in with the values: -// Inception, Expiration, KeyTag, SignerName and Algorithm. The rest is copied -// from the RRset. Sign returns a non-nill error when the signing went OK. -// There is no check if RRSet is a proper (RFC 2181) RRSet. If OrigTTL is non -// zero, it is used as-is, otherwise the TTL of the RRset is used as the -// OrigTTL. -func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error { - if k == nil { - return ErrPrivKey - } - // s.Inception and s.Expiration may be 0 (rollover etc.), the rest must be set - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - rr.Hdr.Rrtype = TypeRRSIG - rr.Hdr.Name = rrset[0].Header().Name - rr.Hdr.Class = rrset[0].Header().Class - if rr.OrigTtl == 0 { // If set don't override - rr.OrigTtl = rrset[0].Header().Ttl - } - rr.TypeCovered = rrset[0].Header().Rrtype - rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) - - if strings.HasPrefix(rrset[0].Header().Name, "*") { - rr.Labels-- // wildcard, remove from label count - } - - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - // For signing, lowercase this name - sigwire.SignerName = strings.ToLower(rr.SignerName) - - // Create the desired binary blob - signdata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signdata) - if err != nil { - return err - } - signdata = signdata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case ED25519: - // ed25519 signs the raw message and performs hashing internally. - // All other supported signature schemes operate over the pre-hashed - // message, and thus ed25519 must be handled separately here. - // - // The raw message is passed directly into sign and crypto.Hash(0) is - // used to signal to the crypto.Signer that the data has not been hashed. - signature, err := sign(k, append(signdata, wire...), crypto.Hash(0), rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - default: - h := hash.New() - h.Write(signdata) - h.Write(wire) - - signature, err := sign(k, h.Sum(nil), hash, rr.Algorithm) - if err != nil { - return err - } - - rr.Signature = toBase64(signature) - } - - return nil -} - -func sign(k crypto.Signer, hashed []byte, hash crypto.Hash, alg uint8) ([]byte, error) { - signature, err := k.Sign(rand.Reader, hashed, hash) - if err != nil { - return nil, err - } - - switch alg { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512: - return signature, nil - - case ECDSAP256SHA256, ECDSAP384SHA384: - ecdsaSignature := &struct { - R, S *big.Int - }{} - if _, err := asn1.Unmarshal(signature, ecdsaSignature); err != nil { - return nil, err - } - - var intlen int - switch alg { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - - signature := intToBytes(ecdsaSignature.R, intlen) - signature = append(signature, intToBytes(ecdsaSignature.S, intlen)...) - return signature, nil - - // There is no defined interface for what a DSA backed crypto.Signer returns - case DSA, DSANSEC3SHA1: - // t := divRoundUp(divRoundUp(p.PublicKey.Y.BitLen(), 8)-64, 8) - // signature := []byte{byte(t)} - // signature = append(signature, intToBytes(r1, 20)...) - // signature = append(signature, intToBytes(s1, 20)...) - // rr.Signature = signature - - case ED25519: - return signature, nil - } - - return nil, ErrAlg -} - -// Verify validates an RRSet with the signature and key. This is only the -// cryptographic test, the signature validity period must be checked separately. -// This function copies the rdata of some RRs (to lowercase domain names) for the validation to work. -func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { - // First the easy checks - if !IsRRset(rrset) { - return ErrRRset - } - if rr.KeyTag != k.KeyTag() { - return ErrKey - } - if rr.Hdr.Class != k.Hdr.Class { - return ErrKey - } - if rr.Algorithm != k.Algorithm { - return ErrKey - } - if !strings.EqualFold(rr.SignerName, k.Hdr.Name) { - return ErrKey - } - if k.Protocol != 3 { - return ErrKey - } - - // IsRRset checked that we have at least one RR and that the RRs in - // the set have consistent type, class, and name. Also check that type and - // class matches the RRSIG record. - if rrset[0].Header().Class != rr.Hdr.Class { - return ErrRRset - } - if rrset[0].Header().Rrtype != rr.TypeCovered { - return ErrRRset - } - - // RFC 4035 5.3.2. Reconstructing the Signed Data - // Copy the sig, except the rrsig data - sigwire := new(rrsigWireFmt) - sigwire.TypeCovered = rr.TypeCovered - sigwire.Algorithm = rr.Algorithm - sigwire.Labels = rr.Labels - sigwire.OrigTtl = rr.OrigTtl - sigwire.Expiration = rr.Expiration - sigwire.Inception = rr.Inception - sigwire.KeyTag = rr.KeyTag - sigwire.SignerName = strings.ToLower(rr.SignerName) - // Create the desired binary blob - signeddata := make([]byte, DefaultMsgSize) - n, err := packSigWire(sigwire, signeddata) - if err != nil { - return err - } - signeddata = signeddata[:n] - wire, err := rawSignatureData(rrset, rr) - if err != nil { - return err - } - - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return ErrAlg - } - - switch rr.Algorithm { - case RSASHA1, RSASHA1NSEC3SHA1, RSASHA256, RSASHA512, RSAMD5: - // TODO(mg): this can be done quicker, ie. cache the pubkey data somewhere?? - pubkey := k.publicKeyRSA() // Get the key - if pubkey == nil { - return ErrKey - } - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - return rsa.VerifyPKCS1v15(pubkey, hash, h.Sum(nil), sigbuf) - - case ECDSAP256SHA256, ECDSAP384SHA384: - pubkey := k.publicKeyECDSA() - if pubkey == nil { - return ErrKey - } - - // Split sigbuf into the r and s coordinates - r := new(big.Int).SetBytes(sigbuf[:len(sigbuf)/2]) - s := new(big.Int).SetBytes(sigbuf[len(sigbuf)/2:]) - - h := hash.New() - h.Write(signeddata) - h.Write(wire) - if ecdsa.Verify(pubkey, h.Sum(nil), r, s) { - return nil - } - return ErrSig - - case ED25519: - pubkey := k.publicKeyED25519() - if pubkey == nil { - return ErrKey - } - - if ed25519.Verify(pubkey, append(signeddata, wire...), sigbuf) { - return nil - } - return ErrSig - - default: - return ErrAlg - } -} - -// ValidityPeriod uses RFC1982 serial arithmetic to calculate -// if a signature period is valid. If t is the zero time, the -// current time is taken other t is. Returns true if the signature -// is valid at the given time, otherwise returns false. -func (rr *RRSIG) ValidityPeriod(t time.Time) bool { - var utc int64 - if t.IsZero() { - utc = time.Now().UTC().Unix() - } else { - utc = t.UTC().Unix() - } - modi := (int64(rr.Inception) - utc) / year68 - mode := (int64(rr.Expiration) - utc) / year68 - ti := int64(rr.Inception) + modi*year68 - te := int64(rr.Expiration) + mode*year68 - return ti <= utc && utc <= te -} - -// Return the signatures base64 encodedig sigdata as a byte slice. -func (rr *RRSIG) sigBuf() []byte { - sigbuf, err := fromBase64([]byte(rr.Signature)) - if err != nil { - return nil - } - return sigbuf -} - -// publicKeyRSA returns the RSA public key from a DNSKEY record. -func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - - if len(keybuf) < 1+1+64 { - // Exponent must be at least 1 byte and modulus at least 64 - return nil - } - - // RFC 2537/3110, section 2. RSA Public KEY Resource Records - // Length is in the 0th byte, unless its zero, then it - // it in bytes 1 and 2 and its a 16 bit number - explen := uint16(keybuf[0]) - keyoff := 1 - if explen == 0 { - explen = uint16(keybuf[1])<<8 | uint16(keybuf[2]) - keyoff = 3 - } - - if explen > 4 || explen == 0 || keybuf[keyoff] == 0 { - // Exponent larger than supported by the crypto package, - // empty, or contains prohibited leading zero. - return nil - } - - modoff := keyoff + int(explen) - modlen := len(keybuf) - modoff - if modlen < 64 || modlen > 512 || keybuf[modoff] == 0 { - // Modulus is too small, large, or contains prohibited leading zero. - return nil - } - - pubkey := new(rsa.PublicKey) - - expo := uint64(0) - for i := 0; i < int(explen); i++ { - expo <<= 8 - expo |= uint64(keybuf[keyoff+i]) - } - if expo > 1<<31-1 { - // Larger exponent than supported by the crypto package. - return nil - } - pubkey.E = int(expo) - - pubkey.N = big.NewInt(0) - pubkey.N.SetBytes(keybuf[modoff:]) - - return pubkey -} - -// publicKeyECDSA returns the Curve public key from the DNSKEY record. -func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - pubkey := new(ecdsa.PublicKey) - switch k.Algorithm { - case ECDSAP256SHA256: - pubkey.Curve = elliptic.P256() - if len(keybuf) != 64 { - // wrongly encoded key - return nil - } - case ECDSAP384SHA384: - pubkey.Curve = elliptic.P384() - if len(keybuf) != 96 { - // Wrongly encoded key - return nil - } - } - pubkey.X = big.NewInt(0) - pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) - pubkey.Y = big.NewInt(0) - pubkey.Y.SetBytes(keybuf[len(keybuf)/2:]) - return pubkey -} - -func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) < 22 { - return nil - } - t, keybuf := int(keybuf[0]), keybuf[1:] - size := 64 + t*8 - q, keybuf := keybuf[:20], keybuf[20:] - if len(keybuf) != 3*size { - return nil - } - p, keybuf := keybuf[:size], keybuf[size:] - g, y := keybuf[:size], keybuf[size:] - pubkey := new(dsa.PublicKey) - pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) - pubkey.Parameters.P = big.NewInt(0).SetBytes(p) - pubkey.Parameters.G = big.NewInt(0).SetBytes(g) - pubkey.Y = big.NewInt(0).SetBytes(y) - return pubkey -} - -func (k *DNSKEY) publicKeyED25519() ed25519.PublicKey { - keybuf, err := fromBase64([]byte(k.PublicKey)) - if err != nil { - return nil - } - if len(keybuf) != ed25519.PublicKeySize { - return nil - } - return keybuf -} - -type wireSlice [][]byte - -func (p wireSlice) Len() int { return len(p) } -func (p wireSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } -func (p wireSlice) Less(i, j int) bool { - _, ioff, _ := UnpackDomainName(p[i], 0) - _, joff, _ := UnpackDomainName(p[j], 0) - return bytes.Compare(p[i][ioff+10:], p[j][joff+10:]) < 0 -} - -// Return the raw signature data. -func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) { - wires := make(wireSlice, len(rrset)) - for i, r := range rrset { - r1 := r.copy() - r1.Header().Ttl = s.OrigTtl - labels := SplitDomainName(r1.Header().Name) - // 6.2. Canonical RR Form. (4) - wildcards - if len(labels) > int(s.Labels) { - // Wildcard - r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." - } - // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase - r1.Header().Name = strings.ToLower(r1.Header().Name) - // 6.2. Canonical RR Form. (3) - domain rdata to lowercase. - // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, - // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, - // SRV, DNAME, A6 - // - // RFC 6840 - Clarifications and Implementation Notes for DNS Security (DNSSEC): - // Section 6.2 of [RFC4034] also erroneously lists HINFO as a record - // that needs conversion to lowercase, and twice at that. Since HINFO - // records contain no domain names, they are not subject to case - // conversion. - switch x := r1.(type) { - case *NS: - x.Ns = strings.ToLower(x.Ns) - case *MD: - x.Md = strings.ToLower(x.Md) - case *MF: - x.Mf = strings.ToLower(x.Mf) - case *CNAME: - x.Target = strings.ToLower(x.Target) - case *SOA: - x.Ns = strings.ToLower(x.Ns) - x.Mbox = strings.ToLower(x.Mbox) - case *MB: - x.Mb = strings.ToLower(x.Mb) - case *MG: - x.Mg = strings.ToLower(x.Mg) - case *MR: - x.Mr = strings.ToLower(x.Mr) - case *PTR: - x.Ptr = strings.ToLower(x.Ptr) - case *MINFO: - x.Rmail = strings.ToLower(x.Rmail) - x.Email = strings.ToLower(x.Email) - case *MX: - x.Mx = strings.ToLower(x.Mx) - case *RP: - x.Mbox = strings.ToLower(x.Mbox) - x.Txt = strings.ToLower(x.Txt) - case *AFSDB: - x.Hostname = strings.ToLower(x.Hostname) - case *RT: - x.Host = strings.ToLower(x.Host) - case *SIG: - x.SignerName = strings.ToLower(x.SignerName) - case *PX: - x.Map822 = strings.ToLower(x.Map822) - x.Mapx400 = strings.ToLower(x.Mapx400) - case *NAPTR: - x.Replacement = strings.ToLower(x.Replacement) - case *KX: - x.Exchanger = strings.ToLower(x.Exchanger) - case *SRV: - x.Target = strings.ToLower(x.Target) - case *DNAME: - x.Target = strings.ToLower(x.Target) - } - // 6.2. Canonical RR Form. (5) - origTTL - wire := make([]byte, Len(r1)+1) // +1 to be safe(r) - off, err1 := PackRR(r1, wire, 0, nil, false) - if err1 != nil { - return nil, err1 - } - wire = wire[:off] - wires[i] = wire - } - sort.Sort(wires) - for i, wire := range wires { - if i > 0 && bytes.Equal(wire, wires[i-1]) { - continue - } - buf = append(buf, wire...) - } - return buf, nil -} - -func packSigWire(sw *rrsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go RRSIG packing - off, err := packUint16(sw.TypeCovered, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(sw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(sw.Labels, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.OrigTtl, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Expiration, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(sw.Inception, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(sw.KeyTag, msg, off) - if err != nil { - return off, err - } - off, err = PackDomainName(sw.SignerName, msg, off, nil, false) - if err != nil { - return off, err - } - return off, nil -} - -func packKeyWire(dw *dnskeyWireFmt, msg []byte) (int, error) { - // copied from zmsg.go DNSKEY packing - off, err := packUint16(dw.Flags, msg, 0) - if err != nil { - return off, err - } - off, err = packUint8(dw.Protocol, msg, off) - if err != nil { - return off, err - } - off, err = packUint8(dw.Algorithm, msg, off) - if err != nil { - return off, err - } - off, err = packStringBase64(dw.PublicKey, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/dnssec_keygen.go b/vendor/github.com/miekg/dns/dnssec_keygen.go deleted file mode 100644 index 33e913ac5..000000000 --- a/vendor/github.com/miekg/dns/dnssec_keygen.go +++ /dev/null @@ -1,178 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "math/big" - - "golang.org/x/crypto/ed25519" -) - -// Generate generates a DNSKEY of the given bit size. -// The public part is put inside the DNSKEY record. -// The Algorithm in the key must be set as this will define -// what kind of DNSKEY will be generated. -// The ECDSA algorithms imply a fixed keysize, in that case -// bits should be set to the size of the algorithm. -func (k *DNSKEY) Generate(bits int) (crypto.PrivateKey, error) { - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - if bits != 1024 { - return nil, ErrKeySize - } - case RSAMD5, RSASHA1, RSASHA256, RSASHA1NSEC3SHA1: - if bits < 512 || bits > 4096 { - return nil, ErrKeySize - } - case RSASHA512: - if bits < 1024 || bits > 4096 { - return nil, ErrKeySize - } - case ECDSAP256SHA256: - if bits != 256 { - return nil, ErrKeySize - } - case ECDSAP384SHA384: - if bits != 384 { - return nil, ErrKeySize - } - case ED25519: - if bits != 256 { - return nil, ErrKeySize - } - } - - switch k.Algorithm { - case DSA, DSANSEC3SHA1: - params := new(dsa.Parameters) - if err := dsa.GenerateParameters(params, rand.Reader, dsa.L1024N160); err != nil { - return nil, err - } - priv := new(dsa.PrivateKey) - priv.PublicKey.Parameters = *params - err := dsa.GenerateKey(priv, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyDSA(params.Q, params.P, params.G, priv.PublicKey.Y) - return priv, nil - case RSAMD5, RSASHA1, RSASHA256, RSASHA512, RSASHA1NSEC3SHA1: - priv, err := rsa.GenerateKey(rand.Reader, bits) - if err != nil { - return nil, err - } - k.setPublicKeyRSA(priv.PublicKey.E, priv.PublicKey.N) - return priv, nil - case ECDSAP256SHA256, ECDSAP384SHA384: - var c elliptic.Curve - switch k.Algorithm { - case ECDSAP256SHA256: - c = elliptic.P256() - case ECDSAP384SHA384: - c = elliptic.P384() - } - priv, err := ecdsa.GenerateKey(c, rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyECDSA(priv.PublicKey.X, priv.PublicKey.Y) - return priv, nil - case ED25519: - pub, priv, err := ed25519.GenerateKey(rand.Reader) - if err != nil { - return nil, err - } - k.setPublicKeyED25519(pub) - return priv, nil - default: - return nil, ErrAlg - } -} - -// Set the public key (the value E and N) -func (k *DNSKEY) setPublicKeyRSA(_E int, _N *big.Int) bool { - if _E == 0 || _N == nil { - return false - } - buf := exponentToBuf(_E) - buf = append(buf, _N.Bytes()...) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Elliptic Curves -func (k *DNSKEY) setPublicKeyECDSA(_X, _Y *big.Int) bool { - if _X == nil || _Y == nil { - return false - } - var intlen int - switch k.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - k.PublicKey = toBase64(curveToBuf(_X, _Y, intlen)) - return true -} - -// Set the public key for DSA -func (k *DNSKEY) setPublicKeyDSA(_Q, _P, _G, _Y *big.Int) bool { - if _Q == nil || _P == nil || _G == nil || _Y == nil { - return false - } - buf := dsaToBuf(_Q, _P, _G, _Y) - k.PublicKey = toBase64(buf) - return true -} - -// Set the public key for Ed25519 -func (k *DNSKEY) setPublicKeyED25519(_K ed25519.PublicKey) bool { - if _K == nil { - return false - } - k.PublicKey = toBase64(_K) - return true -} - -// Set the public key (the values E and N) for RSA -// RFC 3110: Section 2. RSA Public KEY Resource Records -func exponentToBuf(_E int) []byte { - var buf []byte - i := big.NewInt(int64(_E)).Bytes() - if len(i) < 256 { - buf = make([]byte, 1, 1+len(i)) - buf[0] = uint8(len(i)) - } else { - buf = make([]byte, 3, 3+len(i)) - buf[0] = 0 - buf[1] = uint8(len(i) >> 8) - buf[2] = uint8(len(i)) - } - buf = append(buf, i...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func curveToBuf(_X, _Y *big.Int, intlen int) []byte { - buf := intToBytes(_X, intlen) - buf = append(buf, intToBytes(_Y, intlen)...) - return buf -} - -// Set the public key for X and Y for Curve. The two -// values are just concatenated. -func dsaToBuf(_Q, _P, _G, _Y *big.Int) []byte { - t := divRoundUp(divRoundUp(_G.BitLen(), 8)-64, 8) - buf := []byte{byte(t)} - buf = append(buf, intToBytes(_Q, 20)...) - buf = append(buf, intToBytes(_P, 64+t*8)...) - buf = append(buf, intToBytes(_G, 64+t*8)...) - buf = append(buf, intToBytes(_Y, 64+t*8)...) - return buf -} diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go deleted file mode 100644 index 5e6542230..000000000 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ /dev/null @@ -1,352 +0,0 @@ -package dns - -import ( - "bufio" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "io" - "math/big" - "strconv" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// NewPrivateKey returns a PrivateKey by parsing the string s. -// s should be in the same form of the BIND private key files. -func (k *DNSKEY) NewPrivateKey(s string) (crypto.PrivateKey, error) { - if s == "" || s[len(s)-1] != '\n' { // We need a closing newline - return k.ReadPrivateKey(strings.NewReader(s+"\n"), "") - } - return k.ReadPrivateKey(strings.NewReader(s), "") -} - -// ReadPrivateKey reads a private key from the io.Reader q. The string file is -// only used in error reporting. -// The public key must be known, because some cryptographic algorithms embed -// the public inside the privatekey. -func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, error) { - m, err := parseKey(q, file) - if m == nil { - return nil, err - } - if _, ok := m["private-key-format"]; !ok { - return nil, ErrPrivKey - } - if m["private-key-format"] != "v1.2" && m["private-key-format"] != "v1.3" { - return nil, ErrPrivKey - } - // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) - if err != nil { - return nil, ErrPrivKey - } - switch uint8(algo) { - case DSA: - priv, err := readPrivateKeyDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case RSAMD5: - fallthrough - case RSASHA1: - fallthrough - case RSASHA1NSEC3SHA1: - fallthrough - case RSASHA256: - fallthrough - case RSASHA512: - priv, err := readPrivateKeyRSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyRSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ECCGOST: - return nil, ErrPrivKey - case ECDSAP256SHA256: - fallthrough - case ECDSAP384SHA384: - priv, err := readPrivateKeyECDSA(m) - if err != nil { - return nil, err - } - pub := k.publicKeyECDSA() - if pub == nil { - return nil, ErrKey - } - priv.PublicKey = *pub - return priv, nil - case ED25519: - return readPrivateKeyED25519(m) - default: - return nil, ErrPrivKey - } -} - -// Read a private key (file) string and create a public key. Return the private key. -func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) { - p := new(rsa.PrivateKey) - p.Primes = []*big.Int{nil, nil} - for k, v := range m { - switch k { - case "modulus", "publicexponent", "privateexponent", "prime1", "prime2": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - switch k { - case "modulus": - p.PublicKey.N = big.NewInt(0) - p.PublicKey.N.SetBytes(v1) - case "publicexponent": - i := big.NewInt(0) - i.SetBytes(v1) - p.PublicKey.E = int(i.Int64()) // int64 should be large enough - case "privateexponent": - p.D = big.NewInt(0) - p.D.SetBytes(v1) - case "prime1": - p.Primes[0] = big.NewInt(0) - p.Primes[0].SetBytes(v1) - case "prime2": - p.Primes[1] = big.NewInt(0) - p.Primes[1].SetBytes(v1) - } - case "exponent1", "exponent2", "coefficient": - // not used in Go (yet) - case "created", "publish", "activate": - // not used in Go (yet) - } - } - return p, nil -} - -func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { - p := new(dsa.PrivateKey) - p.X = big.NewInt(0) - for k, v := range m { - switch k { - case "private_value(x)": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.X.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { - p := new(ecdsa.PrivateKey) - p.D = big.NewInt(0) - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - v1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - p.D.SetBytes(v1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -func readPrivateKeyED25519(m map[string]string) (ed25519.PrivateKey, error) { - var p ed25519.PrivateKey - // TODO: validate that the required flags are present - for k, v := range m { - switch k { - case "privatekey": - p1, err := fromBase64([]byte(v)) - if err != nil { - return nil, err - } - if len(p1) != ed25519.SeedSize { - return nil, ErrPrivKey - } - p = ed25519.NewKeyFromSeed(p1) - case "created", "publish", "activate": - /* not used in Go (yet) */ - } - } - return p, nil -} - -// parseKey reads a private key from r. It returns a map[string]string, -// with the key-value pairs, or an error when the file is not correct. -func parseKey(r io.Reader, file string) (map[string]string, error) { - m := make(map[string]string) - var k string - - c := newKLexer(r) - - for l, ok := c.Next(); ok; l, ok = c.Next() { - // It should alternate - switch l.value { - case zKey: - k = l.token - case zValue: - if k == "" { - return nil, &ParseError{file, "no private key seen", l} - } - - m[strings.ToLower(k)] = l.token - k = "" - } - } - - // Surface any read errors from r. - if err := c.Err(); err != nil { - return nil, &ParseError{file: file, err: err.Error()} - } - - return m, nil -} - -type klexer struct { - br io.ByteReader - - readErr error - - line int - column int - - key bool - - eol bool // end-of-line -} - -func newKLexer(r io.Reader) *klexer { - br, ok := r.(io.ByteReader) - if !ok { - br = bufio.NewReaderSize(r, 1024) - } - - return &klexer{ - br: br, - - line: 1, - - key: true, - } -} - -func (kl *klexer) Err() error { - if kl.readErr == io.EOF { - return nil - } - - return kl.readErr -} - -// readByte returns the next byte from the input -func (kl *klexer) readByte() (byte, bool) { - if kl.readErr != nil { - return 0, false - } - - c, err := kl.br.ReadByte() - if err != nil { - kl.readErr = err - return 0, false - } - - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if kl.eol { - kl.line++ - kl.column = 0 - kl.eol = false - } - - if c == '\n' { - kl.eol = true - } else { - kl.column++ - } - - return c, true -} - -func (kl *klexer) Next() (lex, bool) { - var ( - l lex - - str strings.Builder - - commt bool - ) - - for x, ok := kl.readByte(); ok; x, ok = kl.readByte() { - l.line, l.column = kl.line, kl.column - - switch x { - case ':': - if commt || !kl.key { - break - } - - kl.key = false - - // Next token is a space, eat it - kl.readByte() - - l.value = zKey - l.token = str.String() - return l, true - case ';': - commt = true - case '\n': - if commt { - // Reset a comment - commt = false - } - - kl.key = true - - l.value = zValue - l.token = str.String() - return l, true - default: - if commt { - break - } - - str.WriteByte(x) - } - } - - if kl.readErr != nil && kl.readErr != io.EOF { - // Don't return any tokens after a read error occurs. - return lex{value: zEOF}, false - } - - if str.Len() > 0 { - // Send remainder - l.value = zValue - l.token = str.String() - return l, true - } - - return lex{value: zEOF}, false -} diff --git a/vendor/github.com/miekg/dns/dnssec_privkey.go b/vendor/github.com/miekg/dns/dnssec_privkey.go deleted file mode 100644 index 0c65be17b..000000000 --- a/vendor/github.com/miekg/dns/dnssec_privkey.go +++ /dev/null @@ -1,93 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "math/big" - "strconv" - - "golang.org/x/crypto/ed25519" -) - -const format = "Private-key-format: v1.3\n" - -// PrivateKeyString converts a PrivateKey to a string. This string has the same -// format as the private-key-file of BIND9 (Private-key-format: v1.3). -// It needs some info from the key (the algorithm), so its a method of the DNSKEY -// It supports rsa.PrivateKey, ecdsa.PrivateKey and dsa.PrivateKey -func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string { - algorithm := strconv.Itoa(int(r.Algorithm)) - algorithm += " (" + AlgorithmToString[r.Algorithm] + ")" - - switch p := p.(type) { - case *rsa.PrivateKey: - modulus := toBase64(p.PublicKey.N.Bytes()) - e := big.NewInt(int64(p.PublicKey.E)) - publicExponent := toBase64(e.Bytes()) - privateExponent := toBase64(p.D.Bytes()) - prime1 := toBase64(p.Primes[0].Bytes()) - prime2 := toBase64(p.Primes[1].Bytes()) - // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm - // and from: http://code.google.com/p/go/issues/detail?id=987 - one := big.NewInt(1) - p1 := big.NewInt(0).Sub(p.Primes[0], one) - q1 := big.NewInt(0).Sub(p.Primes[1], one) - exp1 := big.NewInt(0).Mod(p.D, p1) - exp2 := big.NewInt(0).Mod(p.D, q1) - coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0]) - - exponent1 := toBase64(exp1.Bytes()) - exponent2 := toBase64(exp2.Bytes()) - coefficient := toBase64(coeff.Bytes()) - - return format + - "Algorithm: " + algorithm + "\n" + - "Modulus: " + modulus + "\n" + - "PublicExponent: " + publicExponent + "\n" + - "PrivateExponent: " + privateExponent + "\n" + - "Prime1: " + prime1 + "\n" + - "Prime2: " + prime2 + "\n" + - "Exponent1: " + exponent1 + "\n" + - "Exponent2: " + exponent2 + "\n" + - "Coefficient: " + coefficient + "\n" - - case *ecdsa.PrivateKey: - var intlen int - switch r.Algorithm { - case ECDSAP256SHA256: - intlen = 32 - case ECDSAP384SHA384: - intlen = 48 - } - private := toBase64(intToBytes(p.D, intlen)) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - case *dsa.PrivateKey: - T := divRoundUp(divRoundUp(p.PublicKey.Parameters.G.BitLen(), 8)-64, 8) - prime := toBase64(intToBytes(p.PublicKey.Parameters.P, 64+T*8)) - subprime := toBase64(intToBytes(p.PublicKey.Parameters.Q, 20)) - base := toBase64(intToBytes(p.PublicKey.Parameters.G, 64+T*8)) - priv := toBase64(intToBytes(p.X, 20)) - pub := toBase64(intToBytes(p.PublicKey.Y, 64+T*8)) - return format + - "Algorithm: " + algorithm + "\n" + - "Prime(p): " + prime + "\n" + - "Subprime(q): " + subprime + "\n" + - "Base(g): " + base + "\n" + - "Private_value(x): " + priv + "\n" + - "Public_value(y): " + pub + "\n" - - case ed25519.PrivateKey: - private := toBase64(p.Seed()) - return format + - "Algorithm: " + algorithm + "\n" + - "PrivateKey: " + private + "\n" - - default: - return "" - } -} diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go deleted file mode 100644 index d3d7cec9e..000000000 --- a/vendor/github.com/miekg/dns/doc.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Package dns implements a full featured interface to the Domain Name System. -Both server- and client-side programming is supported. The package allows -complete control over what is sent out to the DNS. The API follows the -less-is-more principle, by presenting a small, clean interface. - -It supports (asynchronous) querying/replying, incoming/outgoing zone transfers, -TSIG, EDNS0, dynamic updates, notifies and DNSSEC validation/signing. - -Note that domain names MUST be fully qualified before sending them, unqualified -names in a message will result in a packing failure. - -Resource records are native types. They are not stored in wire format. Basic -usage pattern for creating a new resource record: - - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." - -Or directly from a string: - - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") - -Or when the default origin (.) and TTL (3600) and class (IN) suit you: - - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") - -Or even: - - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") - -In the DNS messages are exchanged, these messages contain resource records -(sets). Use pattern for creating a message: - - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - -Or when not certain if the domain name is fully qualified: - - m.SetQuestion(dns.Fqdn("miek.nl"), dns.TypeMX) - -The message m is now a message with the question section set to ask the MX -records for the miek.nl. zone. - -The following is slightly more verbose, but more flexible: - - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} - -After creating a message it can be sent. Basic use pattern for synchronous -querying the DNS at a server configured on 127.0.0.1 and port 53: - - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") - -Suppressing multiple outstanding queries (with the same question, type and -class) is as easy as setting: - - c.SingleInflight = true - -More advanced options are available using a net.Dialer and the corresponding API. -For example it is possible to set a timeout, or to specify a source IP address -and port to use for the connection: - - c := new(dns.Client) - laddr := net.UDPAddr{ - IP: net.ParseIP("[::1]"), - Port: 12345, - Zone: "", - } - c.Dialer := &net.Dialer{ - Timeout: 200 * time.Millisecond, - LocalAddr: &laddr, - } - in, rtt, err := c.Exchange(m1, "8.8.8.8:53") - -If these "advanced" features are not needed, a simple UDP query can be sent, -with: - - in, err := dns.Exchange(m1, "127.0.0.1:53") - -When this functions returns you will get dns message. A dns message consists -out of four sections. -The question section: in.Question, the answer section: in.Answer, -the authority section: in.Ns and the additional section: in.Extra. - -Each of these sections (except the Question section) contain a []RR. Basic -use pattern for accessing the rdata of a TXT RR as the first RR in -the Answer section: - - if t, ok := in.Answer[0].(*dns.TXT); ok { - // do something with t.Txt - } - -Domain Name and TXT Character String Representations - -Both domain names and TXT character strings are converted to presentation form -both when unpacked and when converted to strings. - -For TXT character strings, tabs, carriage returns and line feeds will be -converted to \t, \r and \n respectively. Back slashes and quotations marks will -be escaped. Bytes below 32 and above 127 will be converted to \DDD form. - -For domain names, in addition to the above rules brackets, periods, spaces, -semicolons and the at symbol are escaped. - -DNSSEC - -DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses -public key cryptography to sign resource records. The public keys are stored in -DNSKEY records and the signatures in RRSIG records. - -Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) -bit to a request. - - m := new(dns.Msg) - m.SetEdns0(4096, true) - -Signature generation, signature verification and key generation are all supported. - -DYNAMIC UPDATES - -Dynamic updates reuses the DNS message format, but renames three of the -sections. Question is Zone, Answer is Prerequisite, Authority is Update, only -the Additional is not renamed. See RFC 2136 for the gory details. - -You can set a rather complex set of rules for the existence of absence of -certain resource records or names in a zone to specify if resource records -should be added or removed. The table from RFC 2136 supplemented with the Go -DNS function shows which functions exist to specify the prerequisites. - - 3.2.4 - Table Of Metavalues Used In Prerequisite Section - - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used - -The prerequisite section can also be left empty. If you have decided on the -prerequisites you can tell what RRs should be added or deleted. The next table -shows the options you have and what functions to call. - - 3.4.2.6 - Table Of Metavalues Used In Update Section - - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert - -TRANSACTION SIGNATURE - -An TSIG or transaction signature adds a HMAC TSIG record to each message sent. -The supported algorithms include: HmacMD5, HmacSHA1, HmacSHA256 and HmacSHA512. - -Basic use pattern when querying with a TSIG name "axfr." (note that these key names -must be fully qualified - as they are domain names) and the base64 secret -"so6ZGir4GPAqINNh9U5c3A==": - -If an incoming message contains a TSIG record it MUST be the last record in -the additional section (RFC2845 3.2). This means that you should make the -call to SetTsig last, right before executing the query. If you make any -changes to the RRset after calling SetTsig() the signature will be incorrect. - - c := new(dns.Client) - c.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - ... - // When sending the TSIG RR is calculated and filled in before sending - -When requesting an zone transfer (almost all TSIG usage is when requesting zone -transfers), with TSIG, this is the basic use pattern. In this example we -request an AXFR for miek.nl. with TSIG key named "axfr." and secret -"so6ZGir4GPAqINNh9U5c3A==" and using the server 176.58.119.54: - - t := new(dns.Transfer) - m := new(dns.Msg) - t.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - m.SetAxfr("miek.nl.") - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - c, err := t.In(m, "176.58.119.54:53") - for r := range c { ... } - -You can now read the records from the transfer as they come in. Each envelope -is checked with TSIG. If something is not correct an error is returned. - -Basic use pattern validating and replying to a message that has TSIG set. - - server := &dns.Server{Addr: ":53", Net: "udp"} - server.TsigSecret = map[string]string{"axfr.": "so6ZGir4GPAqINNh9U5c3A=="} - go server.ListenAndServe() - dns.HandleFunc(".", handleRequest) - - func handleRequest(w dns.ResponseWriter, r *dns.Msg) { - m := new(dns.Msg) - m.SetReply(r) - if r.IsTsig() != nil { - if w.TsigStatus() == nil { - // *Msg r has an TSIG record and it was validated - m.SetTsig("axfr.", dns.HmacMD5, 300, time.Now().Unix()) - } else { - // *Msg r has an TSIG records and it was not valided - } - } - w.WriteMsg(m) - } - -PRIVATE RRS - -RFC 6895 sets aside a range of type codes for private use. This range is 65,280 -- 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these -can be used, before requesting an official type code from IANA. - -See https://miek.nl/2014/September/21/idn-and-private-rr-in-go-dns/ for more -information. - -EDNS0 - -EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by -RFC 6891. It defines an new RR type, the OPT RR, which is then completely -abused. - -Basic use pattern for creating an (empty) OPT RR: - - o := new(dns.OPT) - o.Hdr.Name = "." // MUST be the root zone, per definition. - o.Hdr.Rrtype = dns.TypeOPT - -The rdata of an OPT RR consists out of a slice of EDNS0 (RFC 6891) interfaces. -Currently only a few have been standardized: EDNS0_NSID (RFC 5001) and -EDNS0_SUBNET (draft-vandergaast-edns-client-subnet-02). Note that these options -may be combined in an OPT RR. Basic use pattern for a server to check if (and -which) options are set: - - // o is a dns.OPT - for _, s := range o.Option { - switch e := s.(type) { - case *dns.EDNS0_NSID: - // do stuff with e.Nsid - case *dns.EDNS0_SUBNET: - // access e.Family, e.Address, etc. - } - } - -SIG(0) - -From RFC 2931: - - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. - -It works like TSIG, except that SIG(0) uses public key cryptography, instead of -the shared secret approach in TSIG. Supported algorithms: DSA, ECDSAP256SHA256, -ECDSAP384SHA384, RSASHA1, RSASHA256 and RSASHA512. - -Signing subsequent messages in multi-message sessions is not implemented. -*/ -package dns diff --git a/vendor/github.com/miekg/dns/duplicate.go b/vendor/github.com/miekg/dns/duplicate.go deleted file mode 100644 index 6372e8a19..000000000 --- a/vendor/github.com/miekg/dns/duplicate.go +++ /dev/null @@ -1,25 +0,0 @@ -package dns - -//go:generate go run duplicate_generate.go - -// IsDuplicate checks of r1 and r2 are duplicates of each other, excluding the TTL. -// So this means the header data is equal *and* the RDATA is the same. Return true -// is so, otherwise false. -// It's is a protocol violation to have identical RRs in a message. -func IsDuplicate(r1, r2 RR) bool { - if r1.Header().Class != r2.Header().Class { - return false - } - if r1.Header().Rrtype != r2.Header().Rrtype { - return false - } - if !isDulicateName(r1.Header().Name, r2.Header().Name) { - return false - } - // ignore TTL - - return isDuplicateRdata(r1, r2) -} - -// isDulicateName checks if the domain names s1 and s2 are equal. -func isDulicateName(s1, s2 string) bool { return equal(s1, s2) } diff --git a/vendor/github.com/miekg/dns/duplicate_generate.go b/vendor/github.com/miekg/dns/duplicate_generate.go deleted file mode 100644 index 83ac1cf77..000000000 --- a/vendor/github.com/miekg/dns/duplicate_generate.go +++ /dev/null @@ -1,158 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" -) - -var packageHdr = ` -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -` - -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - - if name == "PrivateRR" || name == "RFC3597" { - continue - } - if name == "OPT" || name == "ANY" || name == "IXFR" || name == "AXFR" { - continue - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate the giant switch that calls the correct function for each type. - fmt.Fprint(b, "// isDuplicateRdata calls the rdata specific functions\n") - fmt.Fprint(b, "func isDuplicateRdata(r1, r2 RR) bool {\n") - fmt.Fprint(b, "switch r1.Header().Rrtype {\n") - - for _, name := range namedTypes { - - o := scope.Lookup(name) - _, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "case Type%s:\nreturn isDuplicate%s(r1.(*%s), r2.(*%s))\n", name, name, name, name) - } - fmt.Fprintf(b, "}\nreturn false\n}\n") - - // Generate the duplicate check for each type. - fmt.Fprint(b, "// isDuplicate() functions\n\n") - for _, name := range namedTypes { - - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func isDuplicate%s(r1, r2 *%s) bool {\n", name, name) - for i := 1; i < st.NumFields(); i++ { - field := st.Field(i).Name() - o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } - o3 := func(s string) { fmt.Fprintf(b, s+"\n", field, field, field) } - - // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are - // *indirectly* defined as a slice in the net package). - if _, ok := st.Field(i).Type().(*types.Slice); ok || st.Tag(i) == `dns:"a"` || st.Tag(i) == `dns:"aaaa"` { - o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") - - if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { - o3(`for i := 0; i < len(r1.%s); i++ { - if !isDulicateName(r1.%s[i], r2.%s[i]) { - return false - } - }`) - - continue - } - - o3(`for i := 0; i < len(r1.%s); i++ { - if r1.%s[i] != r2.%s[i] { - return false - } - }`) - - continue - } - - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`, `dns:"domain-name"`: - o2("if !isDulicateName(r1.%s, r2.%s) {\nreturn false\n}") - default: - o2("if r1.%s != r2.%s {\nreturn false\n}") - } - } - fmt.Fprintf(b, "return true\n}\n\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zduplicate.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go deleted file mode 100644 index f8c60e616..000000000 --- a/vendor/github.com/miekg/dns/edns.go +++ /dev/null @@ -1,623 +0,0 @@ -package dns - -import ( - "encoding/binary" - "encoding/hex" - "errors" - "fmt" - "net" - "strconv" -) - -// EDNS0 Option codes. -const ( - EDNS0LLQ = 0x1 // long lived queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 - EDNS0UL = 0x2 // update lease draft: http://files.dns-sd.org/draft-sekar-dns-ul.txt - EDNS0NSID = 0x3 // nsid (See RFC 5001) - EDNS0DAU = 0x5 // DNSSEC Algorithm Understood - EDNS0DHU = 0x6 // DS Hash Understood - EDNS0N3U = 0x7 // NSEC3 Hash Understood - EDNS0SUBNET = 0x8 // client-subnet (See RFC 7871) - EDNS0EXPIRE = 0x9 // EDNS0 expire - EDNS0COOKIE = 0xa // EDNS0 Cookie - EDNS0TCPKEEPALIVE = 0xb // EDNS0 tcp keep alive (See RFC 7828) - EDNS0PADDING = 0xc // EDNS0 padding (See RFC 7830) - EDNS0LOCALSTART = 0xFDE9 // Beginning of range reserved for local/experimental use (See RFC 6891) - EDNS0LOCALEND = 0xFFFE // End of range reserved for local/experimental use (See RFC 6891) - _DO = 1 << 15 // DNSSEC OK -) - -// OPT is the EDNS0 RR appended to messages to convey extra (meta) information. -// See RFC 6891. -type OPT struct { - Hdr RR_Header - Option []EDNS0 `dns:"opt"` -} - -func (rr *OPT) String() string { - s := "\n;; OPT PSEUDOSECTION:\n; EDNS: version " + strconv.Itoa(int(rr.Version())) + "; " - if rr.Do() { - s += "flags: do; " - } else { - s += "flags: ; " - } - s += "udp: " + strconv.Itoa(int(rr.UDPSize())) - - for _, o := range rr.Option { - switch o.(type) { - case *EDNS0_NSID: - s += "\n; NSID: " + o.String() - h, e := o.pack() - var r string - if e == nil { - for _, c := range h { - r += "(" + string(c) + ")" - } - s += " " + r - } - case *EDNS0_SUBNET: - s += "\n; SUBNET: " + o.String() - case *EDNS0_COOKIE: - s += "\n; COOKIE: " + o.String() - case *EDNS0_UL: - s += "\n; UPDATE LEASE: " + o.String() - case *EDNS0_LLQ: - s += "\n; LONG LIVED QUERIES: " + o.String() - case *EDNS0_DAU: - s += "\n; DNSSEC ALGORITHM UNDERSTOOD: " + o.String() - case *EDNS0_DHU: - s += "\n; DS HASH UNDERSTOOD: " + o.String() - case *EDNS0_N3U: - s += "\n; NSEC3 HASH UNDERSTOOD: " + o.String() - case *EDNS0_LOCAL: - s += "\n; LOCAL OPT: " + o.String() - case *EDNS0_PADDING: - s += "\n; PADDING: " + o.String() - } - } - return s -} - -func (rr *OPT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for i := 0; i < len(rr.Option); i++ { - l += 4 // Account for 2-byte option code and 2-byte option length. - lo, _ := rr.Option[i].pack() - l += len(lo) - } - return l -} - -// return the old value -> delete SetVersion? - -// Version returns the EDNS version used. Only zero is defined. -func (rr *OPT) Version() uint8 { - return uint8(rr.Hdr.Ttl & 0x00FF0000 >> 16) -} - -// SetVersion sets the version of EDNS. This is usually zero. -func (rr *OPT) SetVersion(v uint8) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0xFF00FFFF | uint32(v)<<16 -} - -// ExtendedRcode returns the EDNS extended RCODE field (the upper 8 bits of the TTL). -func (rr *OPT) ExtendedRcode() int { - return int(rr.Hdr.Ttl&0xFF000000>>24) << 4 -} - -// SetExtendedRcode sets the EDNS extended RCODE field. -// -// If the RCODE is not an extended RCODE, will reset the extended RCODE field to 0. -func (rr *OPT) SetExtendedRcode(v uint16) { - rr.Hdr.Ttl = rr.Hdr.Ttl&0x00FFFFFF | uint32(v>>4)<<24 -} - -// UDPSize returns the UDP buffer size. -func (rr *OPT) UDPSize() uint16 { - return rr.Hdr.Class -} - -// SetUDPSize sets the UDP buffer size. -func (rr *OPT) SetUDPSize(size uint16) { - rr.Hdr.Class = size -} - -// Do returns the value of the DO (DNSSEC OK) bit. -func (rr *OPT) Do() bool { - return rr.Hdr.Ttl&_DO == _DO -} - -// SetDo sets the DO (DNSSEC OK) bit. -// If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. -func (rr *OPT) SetDo(do ...bool) { - if len(do) == 1 { - if do[0] { - rr.Hdr.Ttl |= _DO - } else { - rr.Hdr.Ttl &^= _DO - } - } else { - rr.Hdr.Ttl |= _DO - } -} - -// EDNS0 defines an EDNS0 Option. An OPT RR can have multiple options appended to it. -type EDNS0 interface { - // Option returns the option code for the option. - Option() uint16 - // pack returns the bytes of the option data. - pack() ([]byte, error) - // unpack sets the data as found in the buffer. Is also sets - // the length of the slice as the length of the option data. - unpack([]byte) error - // String returns the string representation of the option. - String() string -} - -// EDNS0_NSID option is used to retrieve a nameserver -// identifier. When sending a request Nsid must be set to the empty string -// The identifier is an opaque string encoded as hex. -// Basic use pattern for creating an nsid option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_NSID) -// e.Code = dns.EDNS0NSID -// e.Nsid = "AA" -// o.Option = append(o.Option, e) -type EDNS0_NSID struct { - Code uint16 // Always EDNS0NSID - Nsid string // This string needs to be hex encoded -} - -func (e *EDNS0_NSID) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Nsid) - if err != nil { - return nil, err - } - return h, nil -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. -func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } -func (e *EDNS0_NSID) String() string { return string(e.Nsid) } - -// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver -// an idea of where the client lives. See RFC 7871. It can then give back a different -// answer depending on the location or network topology. -// Basic use pattern for creating an subnet option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET -// e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 -// e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 -// e.SourceScope = 0 -// e.Address = net.ParseIP("127.0.0.1").To4() // for IPv4 -// // e.Address = net.ParseIP("2001:7b8:32a::2") // for IPV6 -// o.Option = append(o.Option, e) -// -// This code will parse all the available bits when unpacking (up to optlen). -// When packing it will apply SourceNetmask. If you need more advanced logic, -// patches welcome and good luck. -type EDNS0_SUBNET struct { - Code uint16 // Always EDNS0SUBNET - Family uint16 // 1 for IP, 2 for IP6 - SourceNetmask uint8 - SourceScope uint8 - Address net.IP -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_SUBNET) Option() uint16 { return EDNS0SUBNET } - -func (e *EDNS0_SUBNET) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint16(b[0:], e.Family) - b[2] = e.SourceNetmask - b[3] = e.SourceScope - switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // We might don't need to complain either - if e.SourceNetmask != 0 { - return nil, errors.New("dns: bad address family") - } - case 1: - if e.SourceNetmask > net.IPv4len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address.To4()) != net.IPv4len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.To4().Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv4len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - case 2: - if e.SourceNetmask > net.IPv6len*8 { - return nil, errors.New("dns: bad netmask") - } - if len(e.Address) != net.IPv6len { - return nil, errors.New("dns: bad address") - } - ip := e.Address.Mask(net.CIDRMask(int(e.SourceNetmask), net.IPv6len*8)) - needLength := (e.SourceNetmask + 8 - 1) / 8 // division rounding up - b = append(b, ip[:needLength]...) - default: - return nil, errors.New("dns: bad address family") - } - return b, nil -} - -func (e *EDNS0_SUBNET) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Family = binary.BigEndian.Uint16(b) - e.SourceNetmask = b[2] - e.SourceScope = b[3] - switch e.Family { - case 0: - // "dig" sets AddressFamily to 0 if SourceNetmask is also 0 - // It's okay to accept such a packet - if e.SourceNetmask != 0 { - return errors.New("dns: bad address family") - } - e.Address = net.IPv4(0, 0, 0, 0) - case 1: - if e.SourceNetmask > net.IPv4len*8 || e.SourceScope > net.IPv4len*8 { - return errors.New("dns: bad netmask") - } - addr := make(net.IP, net.IPv4len) - copy(addr, b[4:]) - e.Address = addr.To16() - case 2: - if e.SourceNetmask > net.IPv6len*8 || e.SourceScope > net.IPv6len*8 { - return errors.New("dns: bad netmask") - } - addr := make(net.IP, net.IPv6len) - copy(addr, b[4:]) - e.Address = addr - default: - return errors.New("dns: bad address family") - } - return nil -} - -func (e *EDNS0_SUBNET) String() (s string) { - if e.Address == nil { - s = "" - } else if e.Address.To4() != nil { - s = e.Address.String() - } else { - s = "[" + e.Address.String() + "]" - } - s += "/" + strconv.Itoa(int(e.SourceNetmask)) + "/" + strconv.Itoa(int(e.SourceScope)) - return -} - -// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_COOKIE) -// e.Code = dns.EDNS0COOKIE -// e.Cookie = "24a5ac.." -// o.Option = append(o.Option, e) -// -// The Cookie field consists out of a client cookie (RFC 7873 Section 4), that is -// always 8 bytes. It may then optionally be followed by the server cookie. The server -// cookie is of variable length, 8 to a maximum of 32 bytes. In other words: -// -// cCookie := o.Cookie[:16] -// sCookie := o.Cookie[16:] -// -// There is no guarantee that the Cookie string has a specific length. -type EDNS0_COOKIE struct { - Code uint16 // Always EDNS0COOKIE - Cookie string // Hex-encoded cookie data -} - -func (e *EDNS0_COOKIE) pack() ([]byte, error) { - h, err := hex.DecodeString(e.Cookie) - if err != nil { - return nil, err - } - return h, nil -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } -func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } -func (e *EDNS0_COOKIE) String() string { return e.Cookie } - -// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set -// an expiration on an update RR. This is helpful for clients that cannot clean -// up after themselves. This is a draft RFC and more information can be found at -// http://files.dns-sd.org/draft-sekar-dns-ul.txt -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_UL) -// e.Code = dns.EDNS0UL -// e.Lease = 120 // in seconds -// o.Option = append(o.Option, e) -type EDNS0_UL struct { - Code uint16 // Always EDNS0UL - Lease uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } -func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } - -// Copied: http://golang.org/src/pkg/net/dnsmsg.go -func (e *EDNS0_UL) pack() ([]byte, error) { - b := make([]byte, 4) - binary.BigEndian.PutUint32(b, e.Lease) - return b, nil -} - -func (e *EDNS0_UL) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Lease = binary.BigEndian.Uint32(b) - return nil -} - -// EDNS0_LLQ stands for Long Lived Queries: http://tools.ietf.org/html/draft-sekar-dns-llq-01 -// Implemented for completeness, as the EDNS0 type code is assigned. -type EDNS0_LLQ struct { - Code uint16 // Always EDNS0LLQ - Version uint16 - Opcode uint16 - Error uint16 - Id uint64 - LeaseLife uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_LLQ) Option() uint16 { return EDNS0LLQ } - -func (e *EDNS0_LLQ) pack() ([]byte, error) { - b := make([]byte, 18) - binary.BigEndian.PutUint16(b[0:], e.Version) - binary.BigEndian.PutUint16(b[2:], e.Opcode) - binary.BigEndian.PutUint16(b[4:], e.Error) - binary.BigEndian.PutUint64(b[6:], e.Id) - binary.BigEndian.PutUint32(b[14:], e.LeaseLife) - return b, nil -} - -func (e *EDNS0_LLQ) unpack(b []byte) error { - if len(b) < 18 { - return ErrBuf - } - e.Version = binary.BigEndian.Uint16(b[0:]) - e.Opcode = binary.BigEndian.Uint16(b[2:]) - e.Error = binary.BigEndian.Uint16(b[4:]) - e.Id = binary.BigEndian.Uint64(b[6:]) - e.LeaseLife = binary.BigEndian.Uint32(b[14:]) - return nil -} - -func (e *EDNS0_LLQ) String() string { - s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + - " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + - " " + strconv.FormatUint(uint64(e.LeaseLife), 10) - return s -} - -// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. -type EDNS0_DAU struct { - Code uint16 // Always EDNS0DAU - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DAU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. -type EDNS0_DHU struct { - Code uint16 // Always EDNS0DHU - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_DHU) String() string { - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. -type EDNS0_N3U struct { - Code uint16 // Always EDNS0N3U - AlgCode []uint8 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } - -func (e *EDNS0_N3U) String() string { - // Re-use the hash map - s := "" - for i := 0; i < len(e.AlgCode); i++ { - if a, ok := HashToString[e.AlgCode[i]]; ok { - s += " " + a - } else { - s += " " + strconv.Itoa(int(e.AlgCode[i])) - } - } - return s -} - -// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. -type EDNS0_EXPIRE struct { - Code uint16 // Always EDNS0EXPIRE - Expire uint32 -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } -func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } - -func (e *EDNS0_EXPIRE) pack() ([]byte, error) { - b := make([]byte, 4) - b[0] = byte(e.Expire >> 24) - b[1] = byte(e.Expire >> 16) - b[2] = byte(e.Expire >> 8) - b[3] = byte(e.Expire) - return b, nil -} - -func (e *EDNS0_EXPIRE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Expire = binary.BigEndian.Uint32(b) - return nil -} - -// The EDNS0_LOCAL option is used for local/experimental purposes. The option -// code is recommended to be within the range [EDNS0LOCALSTART, EDNS0LOCALEND] -// (RFC6891), although any unassigned code can actually be used. The content of -// the option is made available in Data, unaltered. -// Basic use pattern for creating a local option: -// -// o := new(dns.OPT) -// o.Hdr.Name = "." -// o.Hdr.Rrtype = dns.TypeOPT -// e := new(dns.EDNS0_LOCAL) -// e.Code = dns.EDNS0LOCALSTART -// e.Data = []byte{72, 82, 74} -// o.Option = append(o.Option, e) -type EDNS0_LOCAL struct { - Code uint16 - Data []byte -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } -func (e *EDNS0_LOCAL) String() string { - return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) -} - -func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil -} - -func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } - return nil -} - -// EDNS0_TCP_KEEPALIVE is an EDNS0 option that instructs the server to keep -// the TCP connection alive. See RFC 7828. -type EDNS0_TCP_KEEPALIVE struct { - Code uint16 // Always EDNSTCPKEEPALIVE - Length uint16 // the value 0 if the TIMEOUT is omitted, the value 2 if it is present; - Timeout uint16 // an idle timeout value for the TCP connection, specified in units of 100 milliseconds, encoded in network byte order. -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_TCP_KEEPALIVE) Option() uint16 { return EDNS0TCPKEEPALIVE } - -func (e *EDNS0_TCP_KEEPALIVE) pack() ([]byte, error) { - if e.Timeout != 0 && e.Length != 2 { - return nil, errors.New("dns: timeout specified but length is not 2") - } - if e.Timeout == 0 && e.Length != 0 { - return nil, errors.New("dns: timeout not specified but length is not 0") - } - b := make([]byte, 4+e.Length) - binary.BigEndian.PutUint16(b[0:], e.Code) - binary.BigEndian.PutUint16(b[2:], e.Length) - if e.Length == 2 { - binary.BigEndian.PutUint16(b[4:], e.Timeout) - } - return b, nil -} - -func (e *EDNS0_TCP_KEEPALIVE) unpack(b []byte) error { - if len(b) < 4 { - return ErrBuf - } - e.Length = binary.BigEndian.Uint16(b[2:4]) - if e.Length != 0 && e.Length != 2 { - return errors.New("dns: length mismatch, want 0/2 but got " + strconv.FormatUint(uint64(e.Length), 10)) - } - if e.Length == 2 { - if len(b) < 6 { - return ErrBuf - } - e.Timeout = binary.BigEndian.Uint16(b[4:6]) - } - return nil -} - -func (e *EDNS0_TCP_KEEPALIVE) String() (s string) { - s = "use tcp keep-alive" - if e.Length == 0 { - s += ", timeout omitted" - } else { - s += fmt.Sprintf(", timeout %dms", e.Timeout*100) - } - return -} - -// EDNS0_PADDING option is used to add padding to a request/response. The default -// value of padding SHOULD be 0x0 but other values MAY be used, for instance if -// compression is applied before encryption which may break signatures. -type EDNS0_PADDING struct { - Padding []byte -} - -// Option implements the EDNS0 interface. -func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } -func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } diff --git a/vendor/github.com/miekg/dns/format.go b/vendor/github.com/miekg/dns/format.go deleted file mode 100644 index 3f5303c20..000000000 --- a/vendor/github.com/miekg/dns/format.go +++ /dev/null @@ -1,87 +0,0 @@ -package dns - -import ( - "net" - "reflect" - "strconv" -) - -// NumField returns the number of rdata fields r has. -func NumField(r RR) int { - return reflect.ValueOf(r).Elem().NumField() - 1 // Remove RR_Header -} - -// Field returns the rdata field i as a string. Fields are indexed starting from 1. -// RR types that holds slice data, for instance the NSEC type bitmap will return a single -// string where the types are concatenated using a space. -// Accessing non existing fields will cause a panic. -func Field(r RR, i int) string { - if i == 0 { - return "" - } - d := reflect.ValueOf(r).Elem().Field(i) - switch k := d.Kind(); k { - case reflect.String: - return d.String() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(d.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(d.Uint(), 10) - case reflect.Slice: - switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { - case `dns:"a"`: - // TODO(miek): Hmm store this as 16 bytes - if d.Len() < net.IPv6len { - return net.IPv4(byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint())).String() - } - return net.IPv4(byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint())).String() - case `dns:"aaaa"`: - return net.IP{ - byte(d.Index(0).Uint()), - byte(d.Index(1).Uint()), - byte(d.Index(2).Uint()), - byte(d.Index(3).Uint()), - byte(d.Index(4).Uint()), - byte(d.Index(5).Uint()), - byte(d.Index(6).Uint()), - byte(d.Index(7).Uint()), - byte(d.Index(8).Uint()), - byte(d.Index(9).Uint()), - byte(d.Index(10).Uint()), - byte(d.Index(11).Uint()), - byte(d.Index(12).Uint()), - byte(d.Index(13).Uint()), - byte(d.Index(14).Uint()), - byte(d.Index(15).Uint()), - }.String() - case `dns:"nsec"`: - if d.Len() == 0 { - return "" - } - s := Type(d.Index(0).Uint()).String() - for i := 1; i < d.Len(); i++ { - s += " " + Type(d.Index(i).Uint()).String() - } - return s - default: - // if it does not have a tag its a string slice - fallthrough - case `dns:"txt"`: - if d.Len() == 0 { - return "" - } - s := d.Index(0).String() - for i := 1; i < d.Len(); i++ { - s += " " + d.Index(i).String() - } - return s - } - } - return "" -} diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go deleted file mode 100644 index a8a09184d..000000000 --- a/vendor/github.com/miekg/dns/fuzz.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build fuzz - -package dns - -func Fuzz(data []byte) int { - msg := new(Msg) - - if err := msg.Unpack(data); err != nil { - return 0 - } - if _, err := msg.Pack(); err != nil { - return 0 - } - - return 1 -} - -func FuzzNewRR(data []byte) int { - if _, err := NewRR(string(data)); err != nil { - return 0 - } - return 1 -} diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go deleted file mode 100644 index 97bc39f58..000000000 --- a/vendor/github.com/miekg/dns/generate.go +++ /dev/null @@ -1,242 +0,0 @@ -package dns - -import ( - "bytes" - "fmt" - "io" - "strconv" - "strings" -) - -// Parse the $GENERATE statement as used in BIND9 zones. -// See http://www.zytrax.com/books/dns/ch8/generate.html for instance. -// We are called after '$GENERATE '. After which we expect: -// * the range (12-24/2) -// * lhs (ownername) -// * [[ttl][class]] -// * type -// * rhs (rdata) -// But we are lazy here, only the range is parsed *all* occurrences -// of $ after that are interpreted. -func (zp *ZoneParser) generate(l lex) (RR, bool) { - token := l.token - step := 1 - if i := strings.IndexByte(token, '/'); i >= 0 { - if i+1 == len(token) { - return zp.setParseError("bad step in $GENERATE range", l) - } - - s, err := strconv.Atoi(token[i+1:]) - if err != nil || s <= 0 { - return zp.setParseError("bad step in $GENERATE range", l) - } - - step = s - token = token[:i] - } - - sx := strings.SplitN(token, "-", 2) - if len(sx) != 2 { - return zp.setParseError("bad start-stop in $GENERATE range", l) - } - - start, err := strconv.Atoi(sx[0]) - if err != nil { - return zp.setParseError("bad start in $GENERATE range", l) - } - - end, err := strconv.Atoi(sx[1]) - if err != nil { - return zp.setParseError("bad stop in $GENERATE range", l) - } - if end < 0 || start < 0 || end < start { - return zp.setParseError("bad range in $GENERATE range", l) - } - - zp.c.Next() // _BLANK - - // Create a complete new string, which we then parse again. - var s string - for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { - if l.err { - return zp.setParseError("bad data in $GENERATE directive", l) - } - if l.value == zNewline { - break - } - - s += l.token - } - - r := &generateReader{ - s: s, - - cur: start, - start: start, - end: end, - step: step, - - file: zp.file, - lex: &l, - } - zp.sub = NewZoneParser(r, zp.origin, zp.file) - zp.sub.includeDepth, zp.sub.includeAllowed = zp.includeDepth, zp.includeAllowed - zp.sub.SetDefaultTTL(defaultTtl) - return zp.subNext() -} - -type generateReader struct { - s string - si int - - cur int - start int - end int - step int - - mod bytes.Buffer - - escape bool - - eof bool - - file string - lex *lex -} - -func (r *generateReader) parseError(msg string, end int) *ParseError { - r.eof = true // Make errors sticky. - - l := *r.lex - l.token = r.s[r.si-1 : end] - l.column += r.si // l.column starts one zBLANK before r.s - - return &ParseError{r.file, msg, l} -} - -func (r *generateReader) Read(p []byte) (int, error) { - // NewZLexer, through NewZoneParser, should use ReadByte and - // not end up here. - - panic("not implemented") -} - -func (r *generateReader) ReadByte() (byte, error) { - if r.eof { - return 0, io.EOF - } - if r.mod.Len() > 0 { - return r.mod.ReadByte() - } - - if r.si >= len(r.s) { - r.si = 0 - r.cur += r.step - - r.eof = r.cur > r.end || r.cur < 0 - return '\n', nil - } - - si := r.si - r.si++ - - switch r.s[si] { - case '\\': - if r.escape { - r.escape = false - return '\\', nil - } - - r.escape = true - return r.ReadByte() - case '$': - if r.escape { - r.escape = false - return '$', nil - } - - mod := "%d" - - if si >= len(r.s)-1 { - // End of the string - fmt.Fprintf(&r.mod, mod, r.cur) - return r.mod.ReadByte() - } - - if r.s[si+1] == '$' { - r.si++ - return '$', nil - } - - var offset int - - // Search for { and } - if r.s[si+1] == '{' { - // Modifier block - sep := strings.Index(r.s[si+2:], "}") - if sep < 0 { - return 0, r.parseError("bad modifier in $GENERATE", len(r.s)) - } - - var errMsg string - mod, offset, errMsg = modToPrintf(r.s[si+2 : si+2+sep]) - if errMsg != "" { - return 0, r.parseError(errMsg, si+3+sep) - } - if r.start+offset < 0 || r.end+offset > 1<<31-1 { - return 0, r.parseError("bad offset in $GENERATE", si+3+sep) - } - - r.si += 2 + sep // Jump to it - } - - fmt.Fprintf(&r.mod, mod, r.cur+offset) - return r.mod.ReadByte() - default: - if r.escape { // Pretty useless here - r.escape = false - return r.ReadByte() - } - - return r.s[si], nil - } -} - -// Convert a $GENERATE modifier 0,0,d to something Printf can deal with. -func modToPrintf(s string) (string, int, string) { - // Modifier is { offset [ ,width [ ,base ] ] } - provide default - // values for optional width and type, if necessary. - var offStr, widthStr, base string - switch xs := strings.Split(s, ","); len(xs) { - case 1: - offStr, widthStr, base = xs[0], "0", "d" - case 2: - offStr, widthStr, base = xs[0], xs[1], "d" - case 3: - offStr, widthStr, base = xs[0], xs[1], xs[2] - default: - return "", 0, "bad modifier in $GENERATE" - } - - switch base { - case "o", "d", "x", "X": - default: - return "", 0, "bad base in $GENERATE" - } - - offset, err := strconv.Atoi(offStr) - if err != nil { - return "", 0, "bad offset in $GENERATE" - } - - width, err := strconv.Atoi(widthStr) - if err != nil || width < 0 || width > 255 { - return "", 0, "bad width in $GENERATE" - } - - if width == 0 { - return "%" + base, offset, "" - } - - return "%0" + widthStr + base, offset, "" -} diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go deleted file mode 100644 index 577fc59d2..000000000 --- a/vendor/github.com/miekg/dns/labels.go +++ /dev/null @@ -1,191 +0,0 @@ -package dns - -// Holds a bunch of helper functions for dealing with labels. - -// SplitDomainName splits a name string into it's labels. -// www.miek.nl. returns []string{"www", "miek", "nl"} -// .www.miek.nl. returns []string{"", "www", "miek", "nl"}, -// The root label (.) returns nil. Note that using -// strings.Split(s) will work in most cases, but does not handle -// escaped dots (\.) for instance. -// s must be a syntactically valid domain name, see IsDomainName. -func SplitDomainName(s string) (labels []string) { - if len(s) == 0 { - return nil - } - fqdnEnd := 0 // offset of the final '.' or the length of the name - idx := Split(s) - begin := 0 - if s[len(s)-1] == '.' { - fqdnEnd = len(s) - 1 - } else { - fqdnEnd = len(s) - } - - switch len(idx) { - case 0: - return nil - case 1: - // no-op - default: - end := 0 - for i := 1; i < len(idx); i++ { - end = idx[i] - labels = append(labels, s[begin:end-1]) - begin = end - } - } - - labels = append(labels, s[begin:fqdnEnd]) - return labels -} - -// CompareDomainName compares the names s1 and s2 and -// returns how many labels they have in common starting from the *right*. -// The comparison stops at the first inequality. The names are downcased -// before the comparison. -// -// www.miek.nl. and miek.nl. have two labels in common: miek and nl -// www.miek.nl. and www.bla.nl. have one label in common: nl -// -// s1 and s2 must be syntactically valid domain names. -func CompareDomainName(s1, s2 string) (n int) { - // the first check: root label - if s1 == "." || s2 == "." { - return 0 - } - - l1 := Split(s1) - l2 := Split(s2) - - j1 := len(l1) - 1 // end - i1 := len(l1) - 2 // start - j2 := len(l2) - 1 - i2 := len(l2) - 2 - // the second check can be done here: last/only label - // before we fall through into the for-loop below - if equal(s1[l1[j1]:], s2[l2[j2]:]) { - n++ - } else { - return - } - for { - if i1 < 0 || i2 < 0 { - break - } - if equal(s1[l1[i1]:l1[j1]], s2[l2[i2]:l2[j2]]) { - n++ - } else { - break - } - j1-- - i1-- - j2-- - i2-- - } - return -} - -// CountLabel counts the the number of labels in the string s. -// s must be a syntactically valid domain name. -func CountLabel(s string) (labels int) { - if s == "." { - return - } - off := 0 - end := false - for { - off, end = NextLabel(s, off) - labels++ - if end { - return - } - } -} - -// Split splits a name s into its label indexes. -// www.miek.nl. returns []int{0, 4, 9}, www.miek.nl also returns []int{0, 4, 9}. -// The root name (.) returns nil. Also see SplitDomainName. -// s must be a syntactically valid domain name. -func Split(s string) []int { - if s == "." { - return nil - } - idx := make([]int, 1, 3) - off := 0 - end := false - - for { - off, end = NextLabel(s, off) - if end { - return idx - } - idx = append(idx, off) - } -} - -// NextLabel returns the index of the start of the next label in the -// string s starting at offset. -// The bool end is true when the end of the string has been reached. -// Also see PrevLabel. -func NextLabel(s string, offset int) (i int, end bool) { - quote := false - for i = offset; i < len(s)-1; i++ { - switch s[i] { - case '\\': - quote = !quote - default: - quote = false - case '.': - if quote { - quote = !quote - continue - } - return i + 1, false - } - } - return i + 1, true -} - -// PrevLabel returns the index of the label when starting from the right and -// jumping n labels to the left. -// The bool start is true when the start of the string has been overshot. -// Also see NextLabel. -func PrevLabel(s string, n int) (i int, start bool) { - if n == 0 { - return len(s), false - } - lab := Split(s) - if lab == nil { - return 0, true - } - if n > len(lab) { - return 0, true - } - return lab[len(lab)-n], false -} - -// equal compares a and b while ignoring case. It returns true when equal otherwise false. -func equal(a, b string) bool { - // might be lifted into API function. - la := len(a) - lb := len(b) - if la != lb { - return false - } - - for i := la - 1; i >= 0; i-- { - ai := a[i] - bi := b[i] - if ai >= 'A' && ai <= 'Z' { - ai |= 'a' - 'A' - } - if bi >= 'A' && bi <= 'Z' { - bi |= 'a' - 'A' - } - if ai != bi { - return false - } - } - return true -} diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go deleted file mode 100644 index fad195cfe..000000000 --- a/vendor/github.com/miekg/dns/listen_go111.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build go1.11 -// +build aix darwin dragonfly freebsd linux netbsd openbsd - -package dns - -import ( - "context" - "net" - "syscall" - - "golang.org/x/sys/unix" -) - -const supportsReusePort = true - -func reuseportControl(network, address string, c syscall.RawConn) error { - var opErr error - err := c.Control(func(fd uintptr) { - opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEPORT, 1) - }) - if err != nil { - return err - } - - return opErr -} - -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - var lc net.ListenConfig - if reuseport { - lc.Control = reuseportControl - } - - return lc.Listen(context.Background(), network, addr) -} - -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - var lc net.ListenConfig - if reuseport { - lc.Control = reuseportControl - } - - return lc.ListenPacket(context.Background(), network, addr) -} diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go deleted file mode 100644 index b9201417a..000000000 --- a/vendor/github.com/miekg/dns/listen_go_not111.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd - -package dns - -import "net" - -const supportsReusePort = false - -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - if reuseport { - // TODO(tmthrgd): return an error? - } - - return net.Listen(network, addr) -} - -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - if reuseport { - // TODO(tmthrgd): return an error? - } - - return net.ListenPacket(network, addr) -} diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go deleted file mode 100644 index 33f14c489..000000000 --- a/vendor/github.com/miekg/dns/msg.go +++ /dev/null @@ -1,1231 +0,0 @@ -// DNS packet assembly, see RFC 1035. Converting from - Unpack() - -// and to - Pack() - wire format. -// All the packers and unpackers take a (msg []byte, off int) -// and return (off1 int, ok bool). If they return ok==false, they -// also return off1==len(msg), so that the next unpacker will -// also fail. This lets us avoid checks of ok until the end of a -// packing sequence. - -package dns - -//go:generate go run msg_generate.go - -import ( - crand "crypto/rand" - "encoding/binary" - "fmt" - "math/big" - "math/rand" - "strconv" - "strings" - "sync" -) - -const ( - maxCompressionOffset = 2 << 13 // We have 14 bits for the compression pointer - maxDomainNameWireOctets = 255 // See RFC 1035 section 2.3.4 - - // This is the maximum number of compression pointers that should occur in a - // semantically valid message. Each label in a domain name must be at least one - // octet and is separated by a period. The root label won't be represented by a - // compression pointer to a compression pointer, hence the -2 to exclude the - // smallest valid root label. - // - // It is possible to construct a valid message that has more compression pointers - // than this, and still doesn't loop, by pointing to a previous pointer. This is - // not something a well written implementation should ever do, so we leave them - // to trip the maximum compression pointer check. - maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 - - // This is the maximum length of a domain name in presentation format. The - // maximum wire length of a domain name is 255 octets (see above), with the - // maximum label length being 63. The wire format requires one extra byte over - // the presentation format, reducing the number of octets by 1. Each label in - // the name will be separated by a single period, with each octet in the label - // expanding to at most 4 bytes (\DDD). If all other labels are of the maximum - // length, then the final label can only be 61 octets long to not exceed the - // maximum allowed wire length. - maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1 -) - -// Errors defined in this package. -var ( - ErrAlg error = &Error{err: "bad algorithm"} // ErrAlg indicates an error with the (DNSSEC) algorithm. - ErrAuth error = &Error{err: "bad authentication"} // ErrAuth indicates an error in the TSIG authentication. - ErrBuf error = &Error{err: "buffer size too small"} // ErrBuf indicates that the buffer used is too small for the message. - ErrConnEmpty error = &Error{err: "conn has no connection"} // ErrConnEmpty indicates a connection is being used before it is initialized. - ErrExtendedRcode error = &Error{err: "bad extended rcode"} // ErrExtendedRcode ... - ErrFqdn error = &Error{err: "domain must be fully qualified"} // ErrFqdn indicates that a domain name does not have a closing dot. - ErrId error = &Error{err: "id mismatch"} // ErrId indicates there is a mismatch with the message's ID. - ErrKeyAlg error = &Error{err: "bad key algorithm"} // ErrKeyAlg indicates that the algorithm in the key is not valid. - ErrKey error = &Error{err: "bad key"} - ErrKeySize error = &Error{err: "bad key size"} - ErrLongDomain error = &Error{err: fmt.Sprintf("domain name exceeded %d wire-format octets", maxDomainNameWireOctets)} - ErrNoSig error = &Error{err: "no signature found"} - ErrPrivKey error = &Error{err: "bad private key"} - ErrRcode error = &Error{err: "bad rcode"} - ErrRdata error = &Error{err: "bad rdata"} - ErrRRset error = &Error{err: "bad rrset"} - ErrSecret error = &Error{err: "no secrets defined"} - ErrShortRead error = &Error{err: "short read"} - ErrSig error = &Error{err: "bad signature"} // ErrSig indicates that a signature can not be cryptographically validated. - ErrSoa error = &Error{err: "no SOA"} // ErrSOA indicates that no SOA RR was seen when doing zone transfers. - ErrTime error = &Error{err: "bad time"} // ErrTime indicates a timing error in TSIG authentication. -) - -// Id by default, returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. This being a -// variable the function can be reassigned to a custom function. -// For instance, to make it return a static value: -// -// dns.Id = func() uint16 { return 3 } -var Id = id - -var ( - idLock sync.Mutex - idRand *rand.Rand -) - -// id returns a 16 bits random number to be used as a -// message id. The random provided should be good enough. -func id() uint16 { - idLock.Lock() - - if idRand == nil { - // This (partially) works around - // https://github.com/golang/go/issues/11833 by only - // seeding idRand upon the first call to id. - - var seed int64 - var buf [8]byte - - if _, err := crand.Read(buf[:]); err == nil { - seed = int64(binary.LittleEndian.Uint64(buf[:])) - } else { - seed = rand.Int63() - } - - idRand = rand.New(rand.NewSource(seed)) - } - - // The call to idRand.Uint32 must be within the - // mutex lock because *rand.Rand is not safe for - // concurrent use. - // - // There is no added performance overhead to calling - // idRand.Uint32 inside a mutex lock over just - // calling rand.Uint32 as the global math/rand rng - // is internally protected by a sync.Mutex. - id := uint16(idRand.Uint32()) - - idLock.Unlock() - return id -} - -// MsgHdr is a a manually-unpacked version of (id, bits). -type MsgHdr struct { - Id uint16 - Response bool - Opcode int - Authoritative bool - Truncated bool - RecursionDesired bool - RecursionAvailable bool - Zero bool - AuthenticatedData bool - CheckingDisabled bool - Rcode int -} - -// Msg contains the layout of a DNS message. -type Msg struct { - MsgHdr - Compress bool `json:"-"` // If true, the message will be compressed when converted to wire format. - Question []Question // Holds the RR(s) of the question section. - Answer []RR // Holds the RR(s) of the answer section. - Ns []RR // Holds the RR(s) of the authority section. - Extra []RR // Holds the RR(s) of the additional section. -} - -// ClassToString is a maps Classes to strings for each CLASS wire type. -var ClassToString = map[uint16]string{ - ClassINET: "IN", - ClassCSNET: "CS", - ClassCHAOS: "CH", - ClassHESIOD: "HS", - ClassNONE: "NONE", - ClassANY: "ANY", -} - -// OpcodeToString maps Opcodes to strings. -var OpcodeToString = map[int]string{ - OpcodeQuery: "QUERY", - OpcodeIQuery: "IQUERY", - OpcodeStatus: "STATUS", - OpcodeNotify: "NOTIFY", - OpcodeUpdate: "UPDATE", -} - -// RcodeToString maps Rcodes to strings. -var RcodeToString = map[int]string{ - RcodeSuccess: "NOERROR", - RcodeFormatError: "FORMERR", - RcodeServerFailure: "SERVFAIL", - RcodeNameError: "NXDOMAIN", - RcodeNotImplemented: "NOTIMP", - RcodeRefused: "REFUSED", - RcodeYXDomain: "YXDOMAIN", // See RFC 2136 - RcodeYXRrset: "YXRRSET", - RcodeNXRrset: "NXRRSET", - RcodeNotAuth: "NOTAUTH", - RcodeNotZone: "NOTZONE", - RcodeBadSig: "BADSIG", // Also known as RcodeBadVers, see RFC 6891 - // RcodeBadVers: "BADVERS", - RcodeBadKey: "BADKEY", - RcodeBadTime: "BADTIME", - RcodeBadMode: "BADMODE", - RcodeBadName: "BADNAME", - RcodeBadAlg: "BADALG", - RcodeBadTrunc: "BADTRUNC", - RcodeBadCookie: "BADCOOKIE", -} - -// compressionMap is used to allow a more efficient compression map -// to be used for internal packDomainName calls without changing the -// signature or functionality of public API. -// -// In particular, map[string]uint16 uses 25% less per-entry memory -// than does map[string]int. -type compressionMap struct { - ext map[string]int // external callers - int map[string]uint16 // internal callers -} - -func (m compressionMap) valid() bool { - return m.int != nil || m.ext != nil -} - -func (m compressionMap) insert(s string, pos int) { - if m.ext != nil { - m.ext[s] = pos - } else { - m.int[s] = uint16(pos) - } -} - -func (m compressionMap) find(s string) (int, bool) { - if m.ext != nil { - pos, ok := m.ext[s] - return pos, ok - } - - pos, ok := m.int[s] - return int(pos), ok -} - -// Domain names are a sequence of counted strings -// split at the dots. They end with a zero-length string. - -// PackDomainName packs a domain name s into msg[off:]. -// If compression is wanted compress must be true and the compression -// map needs to hold a mapping between domain names and offsets -// pointing into msg. -func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - off1, _, err = packDomainName(s, msg, off, compressionMap{ext: compression}, compress) - return -} - -func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, labels int, err error) { - // special case if msg == nil - lenmsg := 256 - if msg != nil { - lenmsg = len(msg) - } - - ls := len(s) - if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. - return off, 0, nil - } - - // If not fully qualified, error out, but only if msg != nil #ugly - if s[ls-1] != '.' { - if msg != nil { - return lenmsg, 0, ErrFqdn - } - s += "." - ls++ - } - - // Each dot ends a segment of the name. - // We trade each dot byte for a length byte. - // Except for escaped dots (\.), which are normal dots. - // There is also a trailing zero. - - // Compression - pointer := -1 - - // Emit sequence of counted strings, chopping at dots. - var ( - begin int - compBegin int - compOff int - bs []byte - wasDot bool - ) -loop: - for i := 0; i < ls; i++ { - var c byte - if bs == nil { - c = s[i] - } else { - c = bs[i] - } - - switch c { - case '\\': - if off+1 > lenmsg { - return lenmsg, labels, ErrBuf - } - - if bs == nil { - bs = []byte(s) - } - - // check for \DDD - if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { - bs[i] = dddToByte(bs[i+1:]) - copy(bs[i+1:ls-3], bs[i+4:]) - ls -= 3 - compOff += 3 - } else { - copy(bs[i:ls-1], bs[i+1:]) - ls-- - compOff++ - } - - wasDot = false - case '.': - if wasDot { - // two dots back to back is not legal - return lenmsg, labels, ErrRdata - } - wasDot = true - - labelLen := i - begin - if labelLen >= 1<<6 { // top two bits of length must be clear - return lenmsg, labels, ErrRdata - } - - // off can already (we're in a loop) be bigger than len(msg) - // this happens when a name isn't fully qualified - if off+1+labelLen > lenmsg { - return lenmsg, labels, ErrBuf - } - - // Don't try to compress '.' - // We should only compress when compress is true, but we should also still pick - // up names that can be used for *future* compression(s). - if compression.valid() && !isRootLabel(s, bs, begin, ls) { - if p, ok := compression.find(s[compBegin:]); ok { - // The first hit is the longest matching dname - // keep the pointer offset we get back and store - // the offset of the current name, because that's - // where we need to insert the pointer later - - // If compress is true, we're allowed to compress this dname - if compress { - pointer = p // Where to point to - break loop - } - } else if off < maxCompressionOffset { - // Only offsets smaller than maxCompressionOffset can be used. - compression.insert(s[compBegin:], off) - } - } - - // The following is covered by the length check above. - if msg != nil { - msg[off] = byte(labelLen) - - if bs == nil { - copy(msg[off+1:], s[begin:i]) - } else { - copy(msg[off+1:], bs[begin:i]) - } - } - off += 1 + labelLen - - labels++ - begin = i + 1 - compBegin = begin + compOff - default: - wasDot = false - } - } - - // Root label is special - if isRootLabel(s, bs, 0, ls) { - return off, labels, nil - } - - // If we did compression and we find something add the pointer here - if pointer != -1 { - // We have two bytes (14 bits) to put the pointer in - // if msg == nil, we will never do compression - binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) - return off + 2, labels, nil - } - - if msg != nil && off < lenmsg { - msg[off] = 0 - } - - return off + 1, labels, nil -} - -// isRootLabel returns whether s or bs, from off to end, is the root -// label ".". -// -// If bs is nil, s will be checked, otherwise bs will be checked. -func isRootLabel(s string, bs []byte, off, end int) bool { - if bs == nil { - return s[off:end] == "." - } - - return end-off == 1 && bs[off] == '.' -} - -// Unpack a domain name. -// In addition to the simple sequences of counted strings above, -// domain names are allowed to refer to strings elsewhere in the -// packet, to avoid repeating common suffixes when returning -// many entries in a single domain. The pointers are marked -// by a length byte with the top two bits set. Ignoring those -// two bits, that byte and the next give a 14 bit offset from msg[0] -// where we should pick up the trail. -// Note that if we jump elsewhere in the packet, -// we return off1 == the offset after the first pointer we found, -// which is where the next record will start. -// In theory, the pointers are only allowed to jump backward. -// We let them jump anywhere and stop jumping after a while. - -// UnpackDomainName unpacks a domain name into a string. It returns -// the name, the new offset into msg and any error that occurred. -// -// When an error is encountered, the unpacked name will be discarded -// and len(msg) will be returned as the offset. -func UnpackDomainName(msg []byte, off int) (string, int, error) { - s := make([]byte, 0, maxDomainNamePresentationLength) - off1 := 0 - lenmsg := len(msg) - budget := maxDomainNameWireOctets - ptr := 0 // number of pointers followed -Loop: - for { - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c := int(msg[off]) - off++ - switch c & 0xC0 { - case 0x00: - if c == 0x00 { - // end of name - break Loop - } - // literal string - if off+c > lenmsg { - return "", lenmsg, ErrBuf - } - budget -= c + 1 // +1 for the label separator - if budget <= 0 { - return "", lenmsg, ErrLongDomain - } - for j := off; j < off+c; j++ { - switch b := msg[j]; b { - case '.', '(', ')', ';', ' ', '@': - fallthrough - case '"', '\\': - s = append(s, '\\', b) - default: - if b < ' ' || b > '~' { // unprintable, use \DDD - s = append(s, escapeByte(b)...) - } else { - s = append(s, b) - } - } - } - s = append(s, '.') - off += c - case 0xC0: - // pointer to somewhere else in msg. - // remember location after first ptr, - // since that's how many bytes we consumed. - // also, don't follow too many pointers -- - // maybe there's a loop. - if off >= lenmsg { - return "", lenmsg, ErrBuf - } - c1 := msg[off] - off++ - if ptr == 0 { - off1 = off - } - if ptr++; ptr > maxCompressionPointers { - return "", lenmsg, &Error{err: "too many compression pointers"} - } - // pointer should guarantee that it advances and points forwards at least - // but the condition on previous three lines guarantees that it's - // at least loop-free - off = (c^0xC0)<<8 | int(c1) - default: - // 0x80 and 0x40 are reserved - return "", lenmsg, ErrRdata - } - } - if ptr == 0 { - off1 = off - } - if len(s) == 0 { - return ".", off1, nil - } - return string(s), off1, nil -} - -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { - if len(txt) == 0 { - if offset >= len(msg) { - return offset, ErrBuf - } - msg[offset] = 0 - return offset, nil - } - var err error - for i := range txt { - if len(txt[i]) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(txt[i], msg, offset, tmp) - if err != nil { - return offset, err - } - } - return offset, nil -} - -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { - lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - l := offset - lenByteOffset - 1 - if l > 255 { - return offset, &Error{err: "string exceeded 255 bytes in txt"} - } - msg[lenByteOffset] = byte(l) - return offset, nil -} - -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { - return offset, ErrBuf - } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { - if len(msg) <= offset { - return offset, ErrBuf - } - if bs[i] == '\\' { - i++ - if i == len(bs) { - break - } - // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) - i += 2 - } else { - msg[offset] = bs[i] - } - } else { - msg[offset] = bs[i] - } - offset++ - } - return offset, nil -} - -func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { - off = off0 - var s string - for off < len(msg) && err == nil { - s, off, err = unpackString(msg, off) - if err == nil { - ss = append(ss, s) - } - } - return -} - -// Helpers for dealing with escaped bytes -func isDigit(b byte) bool { return b >= '0' && b <= '9' } - -func dddToByte(s []byte) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -func dddStringToByte(s string) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) -} - -// Helper function for packing and unpacking -func intToBytes(i *big.Int, length int) []byte { - buf := i.Bytes() - if len(buf) < length { - b := make([]byte, length) - copy(b[length-len(buf):], buf) - return b - } - return buf -} - -// PackRR packs a resource record rr into msg[off:]. -// See PackDomainName for documentation about the compression. -func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { - headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress) - if err == nil { - // packRR no longer sets the Rdlength field on the rr, but - // callers might be expecting it so we set it here. - rr.Header().Rdlength = uint16(off1 - headerEnd) - } - return off1, err -} - -func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) { - if rr == nil { - return len(msg), len(msg), &Error{err: "nil rr"} - } - - headerEnd, off1, err = rr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, len(msg), err - } - - rdlength := off1 - headerEnd - if int(uint16(rdlength)) != rdlength { // overflow - return headerEnd, len(msg), ErrRdata - } - - // The RDLENGTH field is the last field in the header and we set it here. - binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength)) - return headerEnd, off1, nil -} - -// UnpackRR unpacks msg[off:] into an RR. -func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) { - h, off, msg, err := unpackHeader(msg, off) - if err != nil { - return nil, len(msg), err - } - - return UnpackRRWithHeader(h, msg, off) -} - -// UnpackRRWithHeader unpacks the record type specific payload given an existing -// RR_Header. -func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { - end := off + int(h.Rdlength) - - if fn, known := typeToUnpack[h.Rrtype]; !known { - rr, off, err = unpackRFC3597(h, msg, off) - } else { - rr, off, err = fn(h, msg, off) - } - if off != end { - return &h, end, &Error{err: "bad rdlength"} - } - return rr, off, err -} - -// unpackRRslice unpacks msg[off:] into an []RR. -// If we cannot unpack the whole array, then it will return nil -func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) { - var r RR - // Don't pre-allocate, l may be under attacker control - var dst []RR - for i := 0; i < l; i++ { - off1 := off - r, off, err = UnpackRR(msg, off) - if err != nil { - off = len(msg) - break - } - // If offset does not increase anymore, l is a lie - if off1 == off { - l = i - break - } - dst = append(dst, r) - } - if err != nil && off == len(msg) { - dst = nil - } - return dst, off, err -} - -// Convert a MsgHdr to a string, with dig-like headers: -// -//;; opcode: QUERY, status: NOERROR, id: 48404 -// -//;; flags: qr aa rd ra; -func (h *MsgHdr) String() string { - if h == nil { - return " MsgHdr" - } - - s := ";; opcode: " + OpcodeToString[h.Opcode] - s += ", status: " + RcodeToString[h.Rcode] - s += ", id: " + strconv.Itoa(int(h.Id)) + "\n" - - s += ";; flags:" - if h.Response { - s += " qr" - } - if h.Authoritative { - s += " aa" - } - if h.Truncated { - s += " tc" - } - if h.RecursionDesired { - s += " rd" - } - if h.RecursionAvailable { - s += " ra" - } - if h.Zero { // Hmm - s += " z" - } - if h.AuthenticatedData { - s += " ad" - } - if h.CheckingDisabled { - s += " cd" - } - - s += ";" - return s -} - -// Pack packs a Msg: it is converted to to wire format. -// If the dns.Compress is true the message will be in compressed wire format. -func (dns *Msg) Pack() (msg []byte, err error) { - return dns.PackBuffer(nil) -} - -// PackBuffer packs a Msg, using the given buffer buf. If buf is too small a new buffer is allocated. -func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) { - // If this message can't be compressed, avoid filling the - // compression map and creating garbage. - if dns.Compress && dns.isCompressible() { - compression := make(map[string]uint16) // Compression pointer mappings. - return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true) - } - - return dns.packBufferWithCompressionMap(buf, compressionMap{}, false) -} - -// packBufferWithCompressionMap packs a Msg, using the given buffer buf. -func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) { - if dns.Rcode < 0 || dns.Rcode > 0xFFF { - return nil, ErrRcode - } - - // Set extended rcode unconditionally if we have an opt, this will allow - // reseting the extended rcode bits if they need to. - if opt := dns.IsEdns0(); opt != nil { - opt.SetExtendedRcode(uint16(dns.Rcode)) - } else if dns.Rcode > 0xF { - // If Rcode is an extended one and opt is nil, error out. - return nil, ErrExtendedRcode - } - - // Convert convenient Msg into wire-like Header. - var dh Header - dh.Id = dns.Id - dh.Bits = uint16(dns.Opcode)<<11 | uint16(dns.Rcode&0xF) - if dns.Response { - dh.Bits |= _QR - } - if dns.Authoritative { - dh.Bits |= _AA - } - if dns.Truncated { - dh.Bits |= _TC - } - if dns.RecursionDesired { - dh.Bits |= _RD - } - if dns.RecursionAvailable { - dh.Bits |= _RA - } - if dns.Zero { - dh.Bits |= _Z - } - if dns.AuthenticatedData { - dh.Bits |= _AD - } - if dns.CheckingDisabled { - dh.Bits |= _CD - } - - dh.Qdcount = uint16(len(dns.Question)) - dh.Ancount = uint16(len(dns.Answer)) - dh.Nscount = uint16(len(dns.Ns)) - dh.Arcount = uint16(len(dns.Extra)) - - // We need the uncompressed length here, because we first pack it and then compress it. - msg = buf - uncompressedLen := msgLenWithCompressionMap(dns, nil) - if packLen := uncompressedLen + 1; len(msg) < packLen { - msg = make([]byte, packLen) - } - - // Pack it in: header and then the pieces. - off := 0 - off, err = dh.pack(msg, off, compression, compress) - if err != nil { - return nil, err - } - for _, r := range dns.Question { - off, err = r.pack(msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Answer { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Ns { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - for _, r := range dns.Extra { - _, off, err = packRR(r, msg, off, compression, compress) - if err != nil { - return nil, err - } - } - return msg[:off], nil -} - -func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { - // If we are at the end of the message we should return *just* the - // header. This can still be useful to the caller. 9.9.9.9 sends these - // when responding with REFUSED for instance. - if off == len(msg) { - // reset sections before returning - dns.Question, dns.Answer, dns.Ns, dns.Extra = nil, nil, nil, nil - return nil - } - - // Qdcount, Ancount, Nscount, Arcount can't be trusted, as they are - // attacker controlled. This means we can't use them to pre-allocate - // slices. - dns.Question = nil - for i := 0; i < int(dh.Qdcount); i++ { - off1 := off - var q Question - q, off, err = unpackQuestion(msg, off) - if err != nil { - return err - } - if off1 == off { // Offset does not increase anymore, dh.Qdcount is a lie! - dh.Qdcount = uint16(i) - break - } - dns.Question = append(dns.Question, q) - } - - dns.Answer, off, err = unpackRRslice(int(dh.Ancount), msg, off) - // The header counts might have been wrong so we need to update it - dh.Ancount = uint16(len(dns.Answer)) - if err == nil { - dns.Ns, off, err = unpackRRslice(int(dh.Nscount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Nscount = uint16(len(dns.Ns)) - if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) - } - // The header counts might have been wrong so we need to update it - dh.Arcount = uint16(len(dns.Extra)) - - // Set extended Rcode - if opt := dns.IsEdns0(); opt != nil { - dns.Rcode |= opt.ExtendedRcode() - } - - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } - return err - -} - -// Unpack unpacks a binary message to a Msg structure. -func (dns *Msg) Unpack(msg []byte) (err error) { - dh, off, err := unpackMsgHdr(msg, 0) - if err != nil { - return err - } - - dns.setHdr(dh) - return dns.unpack(dh, msg, off) -} - -// Convert a complete message to a string with dig-like output. -func (dns *Msg) String() string { - if dns == nil { - return " MsgHdr" - } - s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" - if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" - for i := 0; i < len(dns.Question); i++ { - s += dns.Question[i].String() + "\n" - } - } - if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" - for i := 0; i < len(dns.Answer); i++ { - if dns.Answer[i] != nil { - s += dns.Answer[i].String() + "\n" - } - } - } - if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" - for i := 0; i < len(dns.Ns); i++ { - if dns.Ns[i] != nil { - s += dns.Ns[i].String() + "\n" - } - } - } - if len(dns.Extra) > 0 { - s += "\n;; ADDITIONAL SECTION:\n" - for i := 0; i < len(dns.Extra); i++ { - if dns.Extra[i] != nil { - s += dns.Extra[i].String() + "\n" - } - } - } - return s -} - -// isCompressible returns whether the msg may be compressible. -func (dns *Msg) isCompressible() bool { - // If we only have one question, there is nothing we can ever compress. - return len(dns.Question) > 1 || len(dns.Answer) > 0 || - len(dns.Ns) > 0 || len(dns.Extra) > 0 -} - -// Len returns the message length when in (un)compressed wire format. -// If dns.Compress is true compression it is taken into account. Len() -// is provided to be a faster way to get the size of the resulting packet, -// than packing it, measuring the size and discarding the buffer. -func (dns *Msg) Len() int { - // If this message can't be compressed, avoid filling the - // compression map and creating garbage. - if dns.Compress && dns.isCompressible() { - compression := make(map[string]struct{}) - return msgLenWithCompressionMap(dns, compression) - } - - return msgLenWithCompressionMap(dns, nil) -} - -func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { - l := 12 // Message header is always 12 bytes - - for _, r := range dns.Question { - l += r.len(l, compression) - } - for _, r := range dns.Answer { - if r != nil { - l += r.len(l, compression) - } - } - for _, r := range dns.Ns { - if r != nil { - l += r.len(l, compression) - } - } - for _, r := range dns.Extra { - if r != nil { - l += r.len(l, compression) - } - } - - return l -} - -func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int { - if s == "" || s == "." { - return 1 - } - - escaped := strings.Contains(s, "\\") - - if compression != nil && (compress || off < maxCompressionOffset) { - // compressionLenSearch will insert the entry into the compression - // map if it doesn't contain it. - if l, ok := compressionLenSearch(compression, s, off); ok && compress { - if escaped { - return escapedNameLen(s[:l]) + 2 - } - - return l + 2 - } - } - - if escaped { - return escapedNameLen(s) + 1 - } - - return len(s) + 1 -} - -func escapedNameLen(s string) int { - nameLen := len(s) - for i := 0; i < len(s); i++ { - if s[i] != '\\' { - continue - } - - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { - nameLen -= 3 - i += 3 - } else { - nameLen-- - i++ - } - } - - return nameLen -} - -func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) { - for off, end := 0, false; !end; off, end = NextLabel(s, off) { - if _, ok := c[s[off:]]; ok { - return off, true - } - - if msgOff+off < maxCompressionOffset { - c[s[off:]] = struct{}{} - } - } - - return 0, false -} - -// Copy returns a new RR which is a deep-copy of r. -func Copy(r RR) RR { r1 := r.copy(); return r1 } - -// Len returns the length (in octets) of the uncompressed RR in wire format. -func Len(r RR) int { return r.len(0, nil) } - -// Copy returns a new *Msg which is a deep-copy of dns. -func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } - -// CopyTo copies the contents to the provided message using a deep-copy and returns the copy. -func (dns *Msg) CopyTo(r1 *Msg) *Msg { - r1.MsgHdr = dns.MsgHdr - r1.Compress = dns.Compress - - if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy - } - - rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) - var rri int - - if len(dns.Answer) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Answer); i++ { - rrArr[rri] = dns.Answer[i].copy() - rri++ - } - r1.Answer = rrArr[rrbegin:rri:rri] - } - - if len(dns.Ns) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Ns); i++ { - rrArr[rri] = dns.Ns[i].copy() - rri++ - } - r1.Ns = rrArr[rrbegin:rri:rri] - } - - if len(dns.Extra) > 0 { - rrbegin := rri - for i := 0; i < len(dns.Extra); i++ { - rrArr[rri] = dns.Extra[i].copy() - rri++ - } - r1.Extra = rrArr[rrbegin:rri:rri] - } - - return r1 -} - -func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - off, _, err := packDomainName(q.Name, msg, off, compression, compress) - if err != nil { - return off, err - } - off, err = packUint16(q.Qtype, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(q.Qclass, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func unpackQuestion(msg []byte, off int) (Question, int, error) { - var ( - q Question - err error - ) - q.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qtype, off, err = unpackUint16(msg, off) - if err != nil { - return q, off, err - } - if off == len(msg) { - return q, off, nil - } - q.Qclass, off, err = unpackUint16(msg, off) - if off == len(msg) { - return q, off, nil - } - return q, off, err -} - -func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) { - off, err := packUint16(dh.Id, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Bits, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Qdcount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Ancount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Nscount, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(dh.Arcount, msg, off) - return off, err -} - -func unpackMsgHdr(msg []byte, off int) (Header, int, error) { - var ( - dh Header - err error - ) - dh.Id, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Bits, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Qdcount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Ancount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Nscount, off, err = unpackUint16(msg, off) - if err != nil { - return dh, off, err - } - dh.Arcount, off, err = unpackUint16(msg, off) - return dh, off, err -} - -// setHdr set the header in the dns using the binary data in dh. -func (dns *Msg) setHdr(dh Header) { - dns.Id = dh.Id - dns.Response = dh.Bits&_QR != 0 - dns.Opcode = int(dh.Bits>>11) & 0xF - dns.Authoritative = dh.Bits&_AA != 0 - dns.Truncated = dh.Bits&_TC != 0 - dns.RecursionDesired = dh.Bits&_RD != 0 - dns.RecursionAvailable = dh.Bits&_RA != 0 - dns.Zero = dh.Bits&_Z != 0 // _Z covers the zero bit, which should be zero; not sure why we set it to the opposite. - dns.AuthenticatedData = dh.Bits&_AD != 0 - dns.CheckingDisabled = dh.Bits&_CD != 0 - dns.Rcode = int(dh.Bits & 0xF) -} diff --git a/vendor/github.com/miekg/dns/msg_generate.go b/vendor/github.com/miekg/dns/msg_generate.go deleted file mode 100644 index 86ed04fcb..000000000 --- a/vendor/github.com/miekg/dns/msg_generate.go +++ /dev/null @@ -1,345 +0,0 @@ -//+build ignore - -// msg_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate pack/unpack methods based on the struct tags. The generated source is -// written to zmsg.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" -) - -var packageHdr = ` -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -` - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - fmt.Fprint(b, "// pack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) {\n", name) - fmt.Fprint(b, `headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) -if err != nil { - return headerEnd, off, err -} -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return headerEnd, off, err -} -`) - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("off, err = packStringTxt(rr.%s, msg, off)\n") - case `dns:"opt"`: - o("off, err = packDataOpt(rr.%s, msg, off)\n") - case `dns:"nsec"`: - o("off, err = packDataNsec(rr.%s, msg, off)\n") - case `dns:"domain-name"`: - o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("off, _, err = packDomainName(rr.%s, msg, off, compression, compress)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("off, _, err = packDomainName(rr.%s, msg, off, compression, false)\n") - case st.Tag(i) == `dns:"a"`: - o("off, err = packDataA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"aaaa"`: - o("off, err = packDataAAAA(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"uint48"`: - o("off, err = packUint48(rr.%s, msg, off)\n") - case st.Tag(i) == `dns:"txt"`: - o("off, err = packString(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base32`): // size-base32 can be packed just like base32 - fallthrough - case st.Tag(i) == `dns:"base32"`: - o("off, err = packStringBase32(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): // size-base64 can be packed just like base64 - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("off, err = packStringBase64(rr.%s, msg, off)\n") - - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:SaltLength`): - // directly write instead of using o() so we get the error check in the correct place - field := st.Field(i).Name() - fmt.Fprintf(b, `// Only pack salt if value is not "-", i.e. empty -if rr.%s != "-" { - off, err = packStringHex(rr.%s, msg, off) - if err != nil { - return headerEnd, off, err - } -} -`, field, field) - continue - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): // size-hex can be packed just like hex - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("off, err = packStringHex(rr.%s, msg, off)\n") - - case st.Tag(i) == `dns:"octet"`: - o("off, err = packStringOctet(rr.%s, msg, off)\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("off, err = packUint8(rr.%s, msg, off)\n") - case types.Uint16: - o("off, err = packUint16(rr.%s, msg, off)\n") - case types.Uint32: - o("off, err = packUint32(rr.%s, msg, off)\n") - case types.Uint64: - o("off, err = packUint64(rr.%s, msg, off)\n") - case types.String: - o("off, err = packString(rr.%s, msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintln(b, "return headerEnd, off, nil }\n") - } - - fmt.Fprint(b, "// unpack*() functions\n\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, _ := getTypeStruct(o.Type(), scope) - - fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) - fmt.Fprintf(b, "rr := new(%s)\n", name) - fmt.Fprint(b, "rr.Hdr = h\n") - fmt.Fprint(b, `if noRdata(h) { -return rr, off, nil - } -var err error -rdStart := off -_ = rdStart - -`) - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { - fmt.Fprintf(b, s, st.Field(i).Name()) - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - } - - // size-* are special, because they reference a struct member we should use for the length. - if strings.HasPrefix(st.Tag(i), `dns:"size-`) { - structMember := structMember(st.Tag(i)) - structTag := structTag(st.Tag(i)) - switch structTag { - case "hex": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringHex(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base32": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase32(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - case "base64": - fmt.Fprintf(b, "rr.%s, off, err = unpackStringBase64(msg, off, off + int(rr.%s))\n", st.Field(i).Name(), structMember) - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - fmt.Fprint(b, `if err != nil { -return rr, off, err -} -`) - continue - } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"txt"`: - o("rr.%s, off, err = unpackStringTxt(msg, off)\n") - case `dns:"opt"`: - o("rr.%s, off, err = unpackDataOpt(msg, off)\n") - case `dns:"nsec"`: - o("rr.%s, off, err = unpackDataNsec(msg, off)\n") - case `dns:"domain-name"`: - o("rr.%s, off, err = unpackDataDomainNames(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch st.Tag(i) { - case `dns:"-"`: // ignored - case `dns:"cdomain-name"`: - fallthrough - case `dns:"domain-name"`: - o("rr.%s, off, err = UnpackDomainName(msg, off)\n") - case `dns:"a"`: - o("rr.%s, off, err = unpackDataA(msg, off)\n") - case `dns:"aaaa"`: - o("rr.%s, off, err = unpackDataAAAA(msg, off)\n") - case `dns:"uint48"`: - o("rr.%s, off, err = unpackUint48(msg, off)\n") - case `dns:"txt"`: - o("rr.%s, off, err = unpackString(msg, off)\n") - case `dns:"base32"`: - o("rr.%s, off, err = unpackStringBase32(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"base64"`: - o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"hex"`: - o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") - case `dns:"octet"`: - o("rr.%s, off, err = unpackStringOctet(msg, off)\n") - case "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("rr.%s, off, err = unpackUint8(msg, off)\n") - case types.Uint16: - o("rr.%s, off, err = unpackUint16(msg, off)\n") - case types.Uint32: - o("rr.%s, off, err = unpackUint32(msg, off)\n") - case types.Uint64: - o("rr.%s, off, err = unpackUint64(msg, off)\n") - case types.String: - o("rr.%s, off, err = unpackString(msg, off)\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - // If we've hit len(msg) we return without error. - if i < st.NumFields()-1 { - fmt.Fprintf(b, `if off == len(msg) { -return rr, off, nil - } -`) - } - } - fmt.Fprintf(b, "return rr, off, err }\n\n") - } - // Generate typeToUnpack map - fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){") - for _, name := range namedTypes { - if name == "RFC3597" { - continue - } - fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name) - } - fmt.Fprintln(b, "}\n") - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("zmsg.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -// structMember will take a tag like dns:"size-base32:SaltLength" and return the last part of this string. -func structMember(s string) string { - fields := strings.Split(s, ":") - if len(fields) == 0 { - return "" - } - f := fields[len(fields)-1] - // f should have a closing " - if len(f) > 1 { - return f[:len(f)-1] - } - return f -} - -// structTag will take a tag like dns:"size-base32:SaltLength" and return base32. -func structTag(s string) string { - fields := strings.Split(s, ":") - if len(fields) < 2 { - return "" - } - return fields[1][len("\"size-"):] -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go deleted file mode 100644 index 36345e162..000000000 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ /dev/null @@ -1,633 +0,0 @@ -package dns - -import ( - "encoding/base32" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "net" - "strings" -) - -// helper functions called from the generated zmsg.go - -// These function are named after the tag to help pack/unpack, if there is no tag it is the name -// of the type they pack/unpack (string, int, etc). We prefix all with unpackData or packData, so packDataA or -// packDataDomainName. - -func unpackDataA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv4len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking a"} - } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil -} - -func packDataA(a net.IP, msg []byte, off int) (int, error) { - // It must be a slice of 4, even if it is 16, we encode only the first 4 - if off+net.IPv4len > len(msg) { - return len(msg), &Error{err: "overflow packing a"} - } - switch len(a) { - case net.IPv4len, net.IPv6len: - copy(msg[off:], a.To4()) - off += net.IPv4len - case 0: - // Allowed, for dynamic updates. - default: - return len(msg), &Error{err: "overflow packing a"} - } - return off, nil -} - -func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { - if off+net.IPv6len > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking aaaa"} - } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil -} - -func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { - if off+net.IPv6len > len(msg) { - return len(msg), &Error{err: "overflow packing aaaa"} - } - - switch len(aaaa) { - case net.IPv6len: - copy(msg[off:], aaaa) - off += net.IPv6len - case 0: - // Allowed, dynamic updates. - default: - return len(msg), &Error{err: "overflow packing aaaa"} - } - return off, nil -} - -// unpackHeader unpacks an RR header, returning the offset to the end of the header and a -// re-sliced msg according to the expected length of the RR. -func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte, err error) { - hdr := RR_Header{} - if off == len(msg) { - return hdr, off, msg, nil - } - - hdr.Name, off, err = UnpackDomainName(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rrtype, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Class, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Ttl, off, err = unpackUint32(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - hdr.Rdlength, off, err = unpackUint16(msg, off) - if err != nil { - return hdr, len(msg), msg, err - } - msg, err = truncateMsgFromRdlength(msg, off, hdr.Rdlength) - return hdr, off, msg, err -} - -// pack packs an RR header, returning the offset to the end of the header. -// See PackDomainName for documentation about the compression. -func (hdr RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - if off == len(msg) { - return off, off, nil - } - - off, _, err := packDomainName(hdr.Name, msg, off, compression, compress) - if err != nil { - return off, len(msg), err - } - off, err = packUint16(hdr.Rrtype, msg, off) - if err != nil { - return off, len(msg), err - } - off, err = packUint16(hdr.Class, msg, off) - if err != nil { - return off, len(msg), err - } - off, err = packUint32(hdr.Ttl, msg, off) - if err != nil { - return off, len(msg), err - } - off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR. - if err != nil { - return off, len(msg), err - } - return off, off, nil -} - -// helper helper functions. - -// truncateMsgFromRdLength truncates msg to match the expected length of the RR. -// Returns an error if msg is smaller than the expected size. -func truncateMsgFromRdlength(msg []byte, off int, rdlength uint16) (truncmsg []byte, err error) { - lenrd := off + int(rdlength) - if lenrd > len(msg) { - return msg, &Error{err: "overflowing header size"} - } - return msg[:lenrd], nil -} - -var base32HexNoPadEncoding = base32.HexEncoding.WithPadding(base32.NoPadding) - -func fromBase32(s []byte) (buf []byte, err error) { - for i, b := range s { - if b >= 'a' && b <= 'z' { - s[i] = b - 32 - } - } - buflen := base32HexNoPadEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base32HexNoPadEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase32(b []byte) string { - return base32HexNoPadEncoding.EncodeToString(b) -} - -func fromBase64(s []byte) (buf []byte, err error) { - buflen := base64.StdEncoding.DecodedLen(len(s)) - buf = make([]byte, buflen) - n, err := base64.StdEncoding.Decode(buf, s) - buf = buf[:n] - return -} - -func toBase64(b []byte) string { return base64.StdEncoding.EncodeToString(b) } - -// dynamicUpdate returns true if the Rdlength is zero. -func noRdata(h RR_Header) bool { return h.Rdlength == 0 } - -func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) { - if off+1 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint8"} - } - return uint8(msg[off]), off + 1, nil -} - -func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { - if off+1 > len(msg) { - return len(msg), &Error{err: "overflow packing uint8"} - } - msg[off] = byte(i) - return off + 1, nil -} - -func unpackUint16(msg []byte, off int) (i uint16, off1 int, err error) { - if off+2 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint16"} - } - return binary.BigEndian.Uint16(msg[off:]), off + 2, nil -} - -func packUint16(i uint16, msg []byte, off int) (off1 int, err error) { - if off+2 > len(msg) { - return len(msg), &Error{err: "overflow packing uint16"} - } - binary.BigEndian.PutUint16(msg[off:], i) - return off + 2, nil -} - -func unpackUint32(msg []byte, off int) (i uint32, off1 int, err error) { - if off+4 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint32"} - } - return binary.BigEndian.Uint32(msg[off:]), off + 4, nil -} - -func packUint32(i uint32, msg []byte, off int) (off1 int, err error) { - if off+4 > len(msg) { - return len(msg), &Error{err: "overflow packing uint32"} - } - binary.BigEndian.PutUint32(msg[off:], i) - return off + 4, nil -} - -func unpackUint48(msg []byte, off int) (i uint64, off1 int, err error) { - if off+6 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64 as uint48"} - } - // Used in TSIG where the last 48 bits are occupied, so for now, assume a uint48 (6 bytes) - i = uint64(msg[off])<<40 | uint64(msg[off+1])<<32 | uint64(msg[off+2])<<24 | uint64(msg[off+3])<<16 | - uint64(msg[off+4])<<8 | uint64(msg[off+5]) - off += 6 - return i, off, nil -} - -func packUint48(i uint64, msg []byte, off int) (off1 int, err error) { - if off+6 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64 as uint48"} - } - msg[off] = byte(i >> 40) - msg[off+1] = byte(i >> 32) - msg[off+2] = byte(i >> 24) - msg[off+3] = byte(i >> 16) - msg[off+4] = byte(i >> 8) - msg[off+5] = byte(i) - off += 6 - return off, nil -} - -func unpackUint64(msg []byte, off int) (i uint64, off1 int, err error) { - if off+8 > len(msg) { - return 0, len(msg), &Error{err: "overflow unpacking uint64"} - } - return binary.BigEndian.Uint64(msg[off:]), off + 8, nil -} - -func packUint64(i uint64, msg []byte, off int) (off1 int, err error) { - if off+8 > len(msg) { - return len(msg), &Error{err: "overflow packing uint64"} - } - binary.BigEndian.PutUint64(msg[off:], i) - off += 8 - return off, nil -} - -func unpackString(msg []byte, off int) (string, int, error) { - if off+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - l := int(msg[off]) - if off+l+1 > len(msg) { - return "", off, &Error{err: "overflow unpacking txt"} - } - var s strings.Builder - s.Grow(l) - for _, b := range msg[off+1 : off+1+l] { - switch { - case b == '"' || b == '\\': - s.WriteByte('\\') - s.WriteByte(b) - case b < ' ' || b > '~': // unprintable - s.WriteString(escapeByte(b)) - default: - s.WriteByte(b) - } - } - off += 1 + l - return s.String(), off, nil -} - -func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackStringBase32(msg []byte, off, end int) (string, int, error) { - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base32"} - } - s := toBase32(msg[off:end]) - return s, end, nil -} - -func packStringBase32(s string, msg []byte, off int) (int, error) { - b32, err := fromBase32([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b32) > len(msg) { - return len(msg), &Error{err: "overflow packing base32"} - } - copy(msg[off:off+len(b32)], b32) - off += len(b32) - return off, nil -} - -func unpackStringBase64(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is base64 encoded value, so we don't need an explicit length - // to be set. Thus far all RR's that have base64 encoded fields have those as their - // last one. What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking base64"} - } - s := toBase64(msg[off:end]) - return s, end, nil -} - -func packStringBase64(s string, msg []byte, off int) (int, error) { - b64, err := fromBase64([]byte(s)) - if err != nil { - return len(msg), err - } - if off+len(b64) > len(msg) { - return len(msg), &Error{err: "overflow packing base64"} - } - copy(msg[off:off+len(b64)], b64) - off += len(b64) - return off, nil -} - -func unpackStringHex(msg []byte, off, end int) (string, int, error) { - // Rest of the RR is hex encoded value, so we don't need an explicit length - // to be set. NSEC and TSIG have hex fields with a length field. - // What we do need is the end of the RR! - if end > len(msg) { - return "", len(msg), &Error{err: "overflow unpacking hex"} - } - - s := hex.EncodeToString(msg[off:end]) - return s, end, nil -} - -func packStringHex(s string, msg []byte, off int) (int, error) { - h, err := hex.DecodeString(s) - if err != nil { - return len(msg), err - } - if off+len(h) > len(msg) { - return len(msg), &Error{err: "overflow packing hex"} - } - copy(msg[off:off+len(h)], h) - off += len(h) - return off, nil -} - -func unpackStringTxt(msg []byte, off int) ([]string, int, error) { - txt, off, err := unpackTxt(msg, off) - if err != nil { - return nil, len(msg), err - } - return txt, off, nil -} - -func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { - var edns []EDNS0 -Option: - code := uint16(0) - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - switch code { - case EDNS0NSID: - e := new(EDNS0_NSID) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0SUBNET: - e := new(EDNS0_SUBNET) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0COOKIE: - e := new(EDNS0_COOKIE) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0UL: - e := new(EDNS0_UL) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0LLQ: - e := new(EDNS0_LLQ) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DAU: - e := new(EDNS0_DAU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0DHU: - e := new(EDNS0_DHU) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0N3U: - e := new(EDNS0_N3U) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - case EDNS0PADDING: - e := new(EDNS0_PADDING) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - default: - e := new(EDNS0_LOCAL) - e.Code = code - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - } - - if off < len(msg) { - goto Option - } - - return edns, off, nil -} - -func packDataOpt(options []EDNS0, msg []byte, off int) (int, error) { - for _, el := range options { - b, err := el.pack() - if err != nil || off+3 > len(msg) { - return len(msg), &Error{err: "overflow packing opt"} - } - binary.BigEndian.PutUint16(msg[off:], el.Option()) // Option code - binary.BigEndian.PutUint16(msg[off+2:], uint16(len(b))) // Length - off += 4 - if off+len(b) > len(msg) { - copy(msg[off:], b) - off = len(msg) - continue - } - // Actual data - copy(msg[off:off+len(b)], b) - off += len(b) - } - return off, nil -} - -func unpackStringOctet(msg []byte, off int) (string, int, error) { - s := string(msg[off:]) - return s, len(msg), nil -} - -func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) - if err != nil { - return len(msg), err - } - return off, nil -} - -func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) { - var nsec []uint16 - length, window, lastwindow := 0, 0, -1 - for off < len(msg) { - if off+2 > len(msg) { - return nsec, len(msg), &Error{err: "overflow unpacking nsecx"} - } - window = int(msg[off]) - length = int(msg[off+1]) - off += 2 - if window <= lastwindow { - // RFC 4034: Blocks are present in the NSEC RR RDATA in - // increasing numerical order. - return nsec, len(msg), &Error{err: "out of order NSEC block"} - } - if length == 0 { - // RFC 4034: Blocks with no types present MUST NOT be included. - return nsec, len(msg), &Error{err: "empty NSEC block"} - } - if length > 32 { - return nsec, len(msg), &Error{err: "NSEC block too long"} - } - if off+length > len(msg) { - return nsec, len(msg), &Error{err: "overflowing NSEC block"} - } - - // Walk the bytes in the window and extract the type bits - for j := 0; j < length; j++ { - b := msg[off+j] - // Check the bits one by one, and set the type - if b&0x80 == 0x80 { - nsec = append(nsec, uint16(window*256+j*8+0)) - } - if b&0x40 == 0x40 { - nsec = append(nsec, uint16(window*256+j*8+1)) - } - if b&0x20 == 0x20 { - nsec = append(nsec, uint16(window*256+j*8+2)) - } - if b&0x10 == 0x10 { - nsec = append(nsec, uint16(window*256+j*8+3)) - } - if b&0x8 == 0x8 { - nsec = append(nsec, uint16(window*256+j*8+4)) - } - if b&0x4 == 0x4 { - nsec = append(nsec, uint16(window*256+j*8+5)) - } - if b&0x2 == 0x2 { - nsec = append(nsec, uint16(window*256+j*8+6)) - } - if b&0x1 == 0x1 { - nsec = append(nsec, uint16(window*256+j*8+7)) - } - } - off += length - lastwindow = window - } - return nsec, off, nil -} - -func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) { - if len(bitmap) == 0 { - return off, nil - } - var lastwindow, lastlength uint16 - for j := 0; j < len(bitmap); j++ { - t := bitmap[j] - window := t / 256 - length := (t-window*256)/8 + 1 - if window > lastwindow && lastlength != 0 { // New window, jump to the new offset - off += int(lastlength) + 2 - lastlength = 0 - } - if window < lastwindow || length < lastlength { - return len(msg), &Error{err: "nsec bits out of order"} - } - if off+2+int(length) > len(msg) { - return len(msg), &Error{err: "overflow packing nsec"} - } - // Setting the window # - msg[off] = byte(window) - // Setting the octets length - msg[off+1] = byte(length) - // Setting the bit value for the type in the right octet - msg[off+1+int(length)] |= byte(1 << (7 - t%8)) - lastwindow, lastlength = window, length - } - off += int(lastlength) + 2 - return off, nil -} - -func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) { - var ( - servers []string - s string - err error - ) - if end > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking domain names"} - } - for off < end { - s, off, err = UnpackDomainName(msg, off) - if err != nil { - return servers, len(msg), err - } - servers = append(servers, s) - } - return servers, off, nil -} - -func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) { - var err error - for j := 0; j < len(names); j++ { - off, _, err = packDomainName(names[j], msg, off, compression, compress) - if err != nil { - return len(msg), err - } - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/nsecx.go b/vendor/github.com/miekg/dns/nsecx.go deleted file mode 100644 index 8f071a473..000000000 --- a/vendor/github.com/miekg/dns/nsecx.go +++ /dev/null @@ -1,95 +0,0 @@ -package dns - -import ( - "crypto/sha1" - "encoding/hex" - "strings" -) - -// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. -func HashName(label string, ha uint8, iter uint16, salt string) string { - if ha != SHA1 { - return "" - } - - wireSalt := make([]byte, hex.DecodedLen(len(salt))) - n, err := packStringHex(salt, wireSalt, 0) - if err != nil { - return "" - } - wireSalt = wireSalt[:n] - - name := make([]byte, 255) - off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) - if err != nil { - return "" - } - name = name[:off] - - s := sha1.New() - // k = 0 - s.Write(name) - s.Write(wireSalt) - nsec3 := s.Sum(nil) - - // k > 0 - for k := uint16(0); k < iter; k++ { - s.Reset() - s.Write(nsec3) - s.Write(wireSalt) - nsec3 = s.Sum(nsec3[:0]) - } - - return toBase32(nsec3) -} - -// Cover returns true if a name is covered by the NSEC3 record -func (rr *NSEC3) Cover(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - - nextHash := rr.NextDomain - - // if empty interval found, try cover wildcard hashes so nameHash shouldn't match with ownerHash - if ownerHash == nextHash && nameHash != ownerHash { // empty interval - return true - } - if ownerHash > nextHash { // end of zone - if nameHash > ownerHash { // covered since there is nothing after ownerHash - return true - } - return nameHash < nextHash // if nameHash is before beginning of zone it is covered - } - if nameHash < ownerHash { // nameHash is before ownerHash, not covered - return false - } - return nameHash < nextHash // if nameHash is before nextHash is it covered (between ownerHash and nextHash) -} - -// Match returns true if a name matches the NSEC3 record -func (rr *NSEC3) Match(name string) bool { - nameHash := HashName(name, rr.Hash, rr.Iterations, rr.Salt) - owner := strings.ToUpper(rr.Hdr.Name) - labelIndices := Split(owner) - if len(labelIndices) < 2 { - return false - } - ownerHash := owner[:labelIndices[1]-1] - ownerZone := owner[labelIndices[1]:] - if !IsSubDomain(ownerZone, strings.ToUpper(name)) { // name is outside owner zone - return false - } - if ownerHash == nameHash { - return true - } - return false -} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go deleted file mode 100644 index 18355f9be..000000000 --- a/vendor/github.com/miekg/dns/privaterr.go +++ /dev/null @@ -1,151 +0,0 @@ -package dns - -import ( - "fmt" - "strings" -) - -// PrivateRdata is an interface used for implementing "Private Use" RR types, see -// RFC 6895. This allows one to experiment with new RR types, without requesting an -// official type code. Also see dns.PrivateHandle and dns.PrivateHandleRemove. -type PrivateRdata interface { - // String returns the text presentaton of the Rdata of the Private RR. - String() string - // Parse parses the Rdata of the private RR. - Parse([]string) error - // Pack is used when packing a private RR into a buffer. - Pack([]byte) (int, error) - // Unpack is used when unpacking a private RR from a buffer. - // TODO(miek): diff. signature than Pack, see edns0.go for instance. - Unpack([]byte) (int, error) - // Copy copies the Rdata. - Copy(PrivateRdata) error - // Len returns the length in octets of the Rdata. - Len() int -} - -// PrivateRR represents an RR that uses a PrivateRdata user-defined type. -// It mocks normal RRs and implements dns.RR interface. -type PrivateRR struct { - Hdr RR_Header - Data PrivateRdata -} - -func mkPrivateRR(rrtype uint16) *PrivateRR { - // Panics if RR is not an instance of PrivateRR. - rrfunc, ok := TypeToRR[rrtype] - if !ok { - panic(fmt.Sprintf("dns: invalid operation with Private RR type %d", rrtype)) - } - - anyrr := rrfunc() - switch rr := anyrr.(type) { - case *PrivateRR: - return rr - } - panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr)) -} - -// Header return the RR header of r. -func (r *PrivateRR) Header() *RR_Header { return &r.Hdr } - -func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } - -// Private len and copy parts to satisfy RR interface. -func (r *PrivateRR) len(off int, compression map[string]struct{}) int { - l := r.Hdr.len(off, compression) - l += r.Data.Len() - return l -} - -func (r *PrivateRR) copy() RR { - // make new RR like this: - rr := mkPrivateRR(r.Hdr.Rrtype) - rr.Hdr = r.Hdr - - err := r.Data.Copy(rr.Data) - if err != nil { - panic("dns: got value that could not be used to copy Private rdata") - } - return rr -} - -func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := r.Hdr.pack(msg, off, compression, compress) - if err != nil { - return off, off, err - } - n, err := r.Data.Pack(msg[off:]) - if err != nil { - return headerEnd, len(msg), err - } - off += n - return headerEnd, off, nil -} - -// PrivateHandle registers a private resource record type. It requires -// string and numeric representation of private RR type and generator function as argument. -func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { - rtypestr = strings.ToUpper(rtypestr) - - TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } - TypeToString[rtype] = rtypestr - StringToType[rtypestr] = rtype - - typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) { - if noRdata(h) { - return &h, off, nil - } - var err error - - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - off1, err := rr.Data.Unpack(msg[off:]) - off += off1 - if err != nil { - return rr, off, err - } - return rr, off, err - } - - setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := mkPrivateRR(h.Rrtype) - rr.Hdr = h - - var l lex - text := make([]string, 0, 2) // could be 0..N elements, median is probably 1 - Fetch: - for { - // TODO(miek): we could also be returning _QUOTE, this might or might not - // be an issue (basically parsing TXT becomes hard) - switch l, _ = c.Next(); l.value { - case zNewline, zEOF: - break Fetch - case zString: - text = append(text, l.token) - } - } - - err := rr.Data.Parse(text) - if err != nil { - return nil, &ParseError{f, err.Error(), l}, "" - } - - return rr, nil, "" - } - - typeToparserFunc[rtype] = parserFunc{setPrivateRR, true} -} - -// PrivateHandleRemove removes definitions required to support private RR type. -func PrivateHandleRemove(rtype uint16) { - rtypestr, ok := TypeToString[rtype] - if ok { - delete(TypeToRR, rtype) - delete(TypeToString, rtype) - delete(typeToparserFunc, rtype) - delete(StringToType, rtypestr) - delete(typeToUnpack, rtype) - } -} diff --git a/vendor/github.com/miekg/dns/reverse.go b/vendor/github.com/miekg/dns/reverse.go deleted file mode 100644 index 1f0e2b2a4..000000000 --- a/vendor/github.com/miekg/dns/reverse.go +++ /dev/null @@ -1,43 +0,0 @@ -package dns - -// StringToType is the reverse of TypeToString, needed for string parsing. -var StringToType = reverseInt16(TypeToString) - -// StringToClass is the reverse of ClassToString, needed for string parsing. -var StringToClass = reverseInt16(ClassToString) - -// StringToOpcode is a map of opcodes to strings. -var StringToOpcode = reverseInt(OpcodeToString) - -// StringToRcode is a map of rcodes to strings. -var StringToRcode = reverseInt(RcodeToString) - -func init() { - // Preserve previous NOTIMP typo, see github.com/miekg/dns/issues/733. - StringToRcode["NOTIMPL"] = RcodeNotImplemented -} - -// Reverse a map -func reverseInt8(m map[uint8]string) map[string]uint8 { - n := make(map[string]uint8, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt16(m map[uint16]string) map[string]uint16 { - n := make(map[string]uint16, len(m)) - for u, s := range m { - n[s] = u - } - return n -} - -func reverseInt(m map[int]string) map[string]int { - n := make(map[string]int, len(m)) - for u, s := range m { - n[s] = u - } - return n -} diff --git a/vendor/github.com/miekg/dns/sanitize.go b/vendor/github.com/miekg/dns/sanitize.go deleted file mode 100644 index cac15787a..000000000 --- a/vendor/github.com/miekg/dns/sanitize.go +++ /dev/null @@ -1,85 +0,0 @@ -package dns - -// Dedup removes identical RRs from rrs. It preserves the original ordering. -// The lowest TTL of any duplicates is used in the remaining one. Dedup modifies -// rrs. -// m is used to store the RRs temporary. If it is nil a new map will be allocated. -func Dedup(rrs []RR, m map[string]RR) []RR { - - if m == nil { - m = make(map[string]RR) - } - // Save the keys, so we don't have to call normalizedString twice. - keys := make([]*string, 0, len(rrs)) - - for _, r := range rrs { - key := normalizedString(r) - keys = append(keys, &key) - if _, ok := m[key]; ok { - // Shortest TTL wins. - if m[key].Header().Ttl > r.Header().Ttl { - m[key].Header().Ttl = r.Header().Ttl - } - continue - } - - m[key] = r - } - // If the length of the result map equals the amount of RRs we got, - // it means they were all different. We can then just return the original rrset. - if len(m) == len(rrs) { - return rrs - } - - j := 0 - for i, r := range rrs { - // If keys[i] lives in the map, we should copy and remove it. - if _, ok := m[*keys[i]]; ok { - delete(m, *keys[i]) - rrs[j] = r - j++ - } - - if len(m) == 0 { - break - } - } - - return rrs[:j] -} - -// normalizedString returns a normalized string from r. The TTL -// is removed and the domain name is lowercased. We go from this: -// DomainNameTTLCLASSTYPERDATA to: -// lowercasenameCLASSTYPE... -func normalizedString(r RR) string { - // A string Go DNS makes has: domainnameTTL... - b := []byte(r.String()) - - // find the first non-escaped tab, then another, so we capture where the TTL lives. - esc := false - ttlStart, ttlEnd := 0, 0 - for i := 0; i < len(b) && ttlEnd == 0; i++ { - switch { - case b[i] == '\\': - esc = !esc - case b[i] == '\t' && !esc: - if ttlStart == 0 { - ttlStart = i - continue - } - if ttlEnd == 0 { - ttlEnd = i - } - case b[i] >= 'A' && b[i] <= 'Z' && !esc: - b[i] += 32 - default: - esc = false - } - } - - // remove TTL. - copy(b[ttlStart:], b[ttlEnd:]) - cut := ttlEnd - ttlStart - return string(b[:len(b)-cut]) -} diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go deleted file mode 100644 index 61ace121e..000000000 --- a/vendor/github.com/miekg/dns/scan.go +++ /dev/null @@ -1,1331 +0,0 @@ -package dns - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" -) - -const maxTok = 2048 // Largest token we can return. - -// The maximum depth of $INCLUDE directives supported by the -// ZoneParser API. -const maxIncludeDepth = 7 - -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: -// * Add ownernames if they are left blank; -// * Suppress sequences of spaces; -// * Make each RR fit on one line (_NEWLINE is send as last) -// * Handle comments: ; -// * Handle braces - anywhere. -const ( - // Zonefile - zEOF = iota - zString - zBlank - zQuote - zNewline - zRrtpe - zOwner - zClass - zDirOrigin // $ORIGIN - zDirTTL // $TTL - zDirInclude // $INCLUDE - zDirGenerate // $GENERATE - - // Privatekey file - zValue - zKey - - zExpectOwnerDir // Ownername - zExpectOwnerBl // Whitespace after the ownername - zExpectAny // Expect rrtype, ttl or class - zExpectAnyNoClass // Expect rrtype or ttl - zExpectAnyNoClassBl // The whitespace after _EXPECT_ANY_NOCLASS - zExpectAnyNoTTL // Expect rrtype or class - zExpectAnyNoTTLBl // Whitespace after _EXPECT_ANY_NOTTL - zExpectRrtype // Expect rrtype - zExpectRrtypeBl // Whitespace BEFORE rrtype - zExpectRdata // The first element of the rdata - zExpectDirTTLBl // Space after directive $TTL - zExpectDirTTL // Directive $TTL - zExpectDirOriginBl // Space after directive $ORIGIN - zExpectDirOrigin // Directive $ORIGIN - zExpectDirIncludeBl // Space after directive $INCLUDE - zExpectDirInclude // Directive $INCLUDE - zExpectDirGenerate // Directive $GENERATE - zExpectDirGenerateBl // Space after directive $GENERATE -) - -// ParseError is a parsing error. It contains the parse error and the location in the io.Reader -// where the error occurred. -type ParseError struct { - file string - err string - lex lex -} - -func (e *ParseError) Error() (s string) { - if e.file != "" { - s = e.file + ": " - } - s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + - strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) - return -} - -type lex struct { - token string // text of the token - err bool // when true, token text has lexer error - value uint8 // value: zString, _BLANK, etc. - torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar - line int // line in the file - column int // column in the file - comment string // any comment text seen -} - -// Token holds the token that are returned when a zone file is parsed. -type Token struct { - // The scanned resource record when error is not nil. - RR - // When an error occurred, this has the error specifics. - Error *ParseError - // A potential comment positioned after the RR and on the same line. - Comment string -} - -// ttlState describes the state necessary to fill in an omitted RR TTL -type ttlState struct { - ttl uint32 // ttl is the current default TTL - isByDirective bool // isByDirective indicates whether ttl was set by a $TTL directive -} - -// NewRR reads the RR contained in the string s. Only the first RR is -// returned. If s contains no records, NewRR will return nil with no -// error. -// -// The class defaults to IN and TTL defaults to 3600. The full zone -// file syntax like $TTL, $ORIGIN, etc. is supported. -// -// All fields of the returned RR are set, except RR.Header().Rdlength -// which is set to 0. -func NewRR(s string) (RR, error) { - if len(s) > 0 && s[len(s)-1] != '\n' { // We need a closing newline - return ReadRR(strings.NewReader(s+"\n"), "") - } - return ReadRR(strings.NewReader(s), "") -} - -// ReadRR reads the RR contained in r. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. -// -// See NewRR for more documentation. -func ReadRR(r io.Reader, file string) (RR, error) { - zp := NewZoneParser(r, ".", file) - zp.SetDefaultTTL(defaultTtl) - zp.SetIncludeAllowed(true) - rr, _ := zp.Next() - return rr, zp.Err() -} - -// ParseZone reads a RFC 1035 style zonefile from r. It returns -// *Tokens on the returned channel, each consisting of either a -// parsed RR and optional comment or a nil RR and an error. The -// channel is closed by ParseZone when the end of r is reached. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. The string origin is used as the initial -// origin, as if the file would start with an $ORIGIN directive. -// -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all -// supported. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// for x := range dns.ParseZone(strings.NewReader(z), "", "") { -// if x.Error != nil { -// // log.Println(x.Error) -// } else { -// // Do something with x.RR -// } -// } -// -// Comments specified after an RR (and on the same line!) are -// returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned in Token.Comment. -// Comments inside the RR are returned concatenated along with the -// RR. Comments on a line by themselves are discarded. -// -// To prevent memory leaks it is important to always fully drain the -// returned channel. If an error occurs, it will always be the last -// Token sent on the channel. -// -// Deprecated: New users should prefer the ZoneParser API. -func ParseZone(r io.Reader, origin, file string) chan *Token { - t := make(chan *Token, 10000) - go parseZone(r, origin, file, t) - return t -} - -func parseZone(r io.Reader, origin, file string, t chan *Token) { - defer close(t) - - zp := NewZoneParser(r, origin, file) - zp.SetIncludeAllowed(true) - - for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { - t <- &Token{RR: rr, Comment: zp.Comment()} - } - - if err := zp.Err(); err != nil { - pe, ok := err.(*ParseError) - if !ok { - pe = &ParseError{file: file, err: err.Error()} - } - - t <- &Token{Error: pe} - } -} - -// ZoneParser is a parser for an RFC 1035 style zonefile. -// -// Each parsed RR in the zone is returned sequentially from Next. An -// optional comment can be retrieved with Comment. -// -// The directives $INCLUDE, $ORIGIN, $TTL and $GENERATE are all -// supported. Although $INCLUDE is disabled by default. -// -// Basic usage pattern when reading from a string (z) containing the -// zone data: -// -// zp := NewZoneParser(strings.NewReader(z), "", "") -// -// for rr, ok := zp.Next(); ok; rr, ok = zp.Next() { -// // Do something with rr -// } -// -// if err := zp.Err(); err != nil { -// // log.Println(err) -// } -// -// Comments specified after an RR (and on the same line!) are -// returned too: -// -// foo. IN A 10.0.0.1 ; this is a comment -// -// The text "; this is comment" is returned from Comment. Comments inside -// the RR are returned concatenated along with the RR. Comments on a line -// by themselves are discarded. -type ZoneParser struct { - c *zlexer - - parseErr *ParseError - - origin string - file string - - defttl *ttlState - - h RR_Header - - // sub is used to parse $INCLUDE files and $GENERATE directives. - // Next, by calling subNext, forwards the resulting RRs from this - // sub parser to the calling code. - sub *ZoneParser - osFile *os.File - - com string - - includeDepth uint8 - - includeAllowed bool -} - -// NewZoneParser returns an RFC 1035 style zonefile parser that reads -// from r. -// -// The string file is used in error reporting and to resolve relative -// $INCLUDE directives. The string origin is used as the initial -// origin, as if the file would start with an $ORIGIN directive. -func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { - var pe *ParseError - if origin != "" { - origin = Fqdn(origin) - if _, ok := IsDomainName(origin); !ok { - pe = &ParseError{file, "bad initial origin name", lex{}} - } - } - - return &ZoneParser{ - c: newZLexer(r), - - parseErr: pe, - - origin: origin, - file: file, - } -} - -// SetDefaultTTL sets the parsers default TTL to ttl. -func (zp *ZoneParser) SetDefaultTTL(ttl uint32) { - zp.defttl = &ttlState{ttl, false} -} - -// SetIncludeAllowed controls whether $INCLUDE directives are -// allowed. $INCLUDE directives are not supported by default. -// -// The $INCLUDE directive will open and read from a user controlled -// file on the system. Even if the file is not a valid zonefile, the -// contents of the file may be revealed in error messages, such as: -// -// /etc/passwd: dns: not a TTL: "root:x:0:0:root:/root:/bin/bash" at line: 1:31 -// /etc/shadow: dns: not a TTL: "root:$6$::0:99999:7:::" at line: 1:125 -func (zp *ZoneParser) SetIncludeAllowed(v bool) { - zp.includeAllowed = v -} - -// Err returns the first non-EOF error that was encountered by the -// ZoneParser. -func (zp *ZoneParser) Err() error { - if zp.parseErr != nil { - return zp.parseErr - } - - if zp.sub != nil { - if err := zp.sub.Err(); err != nil { - return err - } - } - - return zp.c.Err() -} - -func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { - zp.parseErr = &ParseError{zp.file, err, l} - return nil, false -} - -// Comment returns an optional text comment that occurred alongside -// the RR. -func (zp *ZoneParser) Comment() string { - return zp.com -} - -func (zp *ZoneParser) subNext() (RR, bool) { - if rr, ok := zp.sub.Next(); ok { - zp.com = zp.sub.com - return rr, true - } - - if zp.sub.osFile != nil { - zp.sub.osFile.Close() - zp.sub.osFile = nil - } - - if zp.sub.Err() != nil { - // We have errors to surface. - return nil, false - } - - zp.sub = nil - return zp.Next() -} - -// Next advances the parser to the next RR in the zonefile and -// returns the (RR, true). It will return (nil, false) when the -// parsing stops, either by reaching the end of the input or an -// error. After Next returns (nil, false), the Err method will return -// any error that occurred during parsing. -func (zp *ZoneParser) Next() (RR, bool) { - zp.com = "" - - if zp.parseErr != nil { - return nil, false - } - if zp.sub != nil { - return zp.subNext() - } - - // 6 possible beginnings of a line (_ is a space): - // - // 0. zRRTYPE -> all omitted until the rrtype - // 1. zOwner _ zRrtype -> class/ttl omitted - // 2. zOwner _ zString _ zRrtype -> class omitted - // 3. zOwner _ zString _ zClass _ zRrtype -> ttl/class - // 4. zOwner _ zClass _ zRrtype -> ttl omitted - // 5. zOwner _ zClass _ zString _ zRrtype -> class/ttl (reversed) - // - // After detecting these, we know the zRrtype so we can jump to functions - // handling the rdata for each of these types. - - st := zExpectOwnerDir // initial state - h := &zp.h - - for l, ok := zp.c.Next(); ok; l, ok = zp.c.Next() { - // zlexer spotted an error already - if l.err { - return zp.setParseError(l.token, l) - } - - switch st { - case zExpectOwnerDir: - // We can also expect a directive, like $TTL or $ORIGIN - if zp.defttl != nil { - h.Ttl = zp.defttl.ttl - } - - h.Class = ClassINET - - switch l.value { - case zNewline: - st = zExpectOwnerDir - case zOwner: - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad owner name", l) - } - - h.Name = name - - st = zExpectOwnerBl - case zDirTTL: - st = zExpectDirTTLBl - case zDirOrigin: - st = zExpectDirOriginBl - case zDirInclude: - st = zExpectDirIncludeBl - case zDirGenerate: - st = zExpectDirGenerateBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - case zClass: - h.Class = l.torc - - st = zExpectAnyNoClassBl - case zBlank: - // Discard, can happen when there is nothing on the - // line except the RR type - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectAnyNoTTLBl - default: - return zp.setParseError("syntax error at beginning", l) - } - case zExpectDirIncludeBl: - if l.value != zBlank { - return zp.setParseError("no blank after $INCLUDE-directive", l) - } - - st = zExpectDirInclude - case zExpectDirInclude: - if l.value != zString { - return zp.setParseError("expecting $INCLUDE value, not this...", l) - } - - neworigin := zp.origin // There may be optionally a new origin set after the filename, if not use current one - switch l, _ := zp.c.Next(); l.value { - case zBlank: - l, _ := zp.c.Next() - if l.value == zString { - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad origin name", l) - } - - neworigin = name - } - case zNewline, zEOF: - // Ok - default: - return zp.setParseError("garbage after $INCLUDE", l) - } - - if !zp.includeAllowed { - return zp.setParseError("$INCLUDE directive not allowed", l) - } - if zp.includeDepth >= maxIncludeDepth { - return zp.setParseError("too deeply nested $INCLUDE", l) - } - - // Start with the new file - includePath := l.token - if !filepath.IsAbs(includePath) { - includePath = filepath.Join(filepath.Dir(zp.file), includePath) - } - - r1, e1 := os.Open(includePath) - if e1 != nil { - var as string - if !filepath.IsAbs(l.token) { - as = fmt.Sprintf(" as `%s'", includePath) - } - - msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) - return zp.setParseError(msg, l) - } - - zp.sub = NewZoneParser(r1, neworigin, includePath) - zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 - zp.sub.SetIncludeAllowed(true) - return zp.subNext() - case zExpectDirTTLBl: - if l.value != zBlank { - return zp.setParseError("no blank after $TTL-directive", l) - } - - st = zExpectDirTTL - case zExpectDirTTL: - if l.value != zString { - return zp.setParseError("expecting $TTL value, not this...", l) - } - - if e, _ := slurpRemainder(zp.c, zp.file); e != nil { - zp.parseErr = e - return nil, false - } - - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("expecting $TTL value, not this...", l) - } - - zp.defttl = &ttlState{ttl, true} - - st = zExpectOwnerDir - case zExpectDirOriginBl: - if l.value != zBlank { - return zp.setParseError("no blank after $ORIGIN-directive", l) - } - - st = zExpectDirOrigin - case zExpectDirOrigin: - if l.value != zString { - return zp.setParseError("expecting $ORIGIN value, not this...", l) - } - - if e, _ := slurpRemainder(zp.c, zp.file); e != nil { - zp.parseErr = e - return nil, false - } - - name, ok := toAbsoluteName(l.token, zp.origin) - if !ok { - return zp.setParseError("bad origin name", l) - } - - zp.origin = name - - st = zExpectOwnerDir - case zExpectDirGenerateBl: - if l.value != zBlank { - return zp.setParseError("no blank after $GENERATE-directive", l) - } - - st = zExpectDirGenerate - case zExpectDirGenerate: - if l.value != zString { - return zp.setParseError("expecting $GENERATE value, not this...", l) - } - - return zp.generate(l) - case zExpectOwnerBl: - if l.value != zBlank { - return zp.setParseError("no blank after owner", l) - } - - st = zExpectAny - case zExpectAny: - switch l.value { - case zRrtpe: - if zp.defttl == nil { - return zp.setParseError("missing TTL with no previous value", l) - } - - h.Rrtype = l.torc - - st = zExpectRdata - case zClass: - h.Class = l.torc - - st = zExpectAnyNoClassBl - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectAnyNoTTLBl - default: - return zp.setParseError("expecting RR type, TTL or class, not this...", l) - } - case zExpectAnyNoClassBl: - if l.value != zBlank { - return zp.setParseError("no blank before class", l) - } - - st = zExpectAnyNoClass - case zExpectAnyNoTTLBl: - if l.value != zBlank { - return zp.setParseError("no blank before TTL", l) - } - - st = zExpectAnyNoTTL - case zExpectAnyNoTTL: - switch l.value { - case zClass: - h.Class = l.torc - - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - default: - return zp.setParseError("expecting RR type or class, not this...", l) - } - case zExpectAnyNoClass: - switch l.value { - case zString: - ttl, ok := stringToTTL(l.token) - if !ok { - return zp.setParseError("not a TTL", l) - } - - h.Ttl = ttl - - if zp.defttl == nil || !zp.defttl.isByDirective { - zp.defttl = &ttlState{ttl, false} - } - - st = zExpectRrtypeBl - case zRrtpe: - h.Rrtype = l.torc - - st = zExpectRdata - default: - return zp.setParseError("expecting RR type or TTL, not this...", l) - } - case zExpectRrtypeBl: - if l.value != zBlank { - return zp.setParseError("no blank before RR type", l) - } - - st = zExpectRrtype - case zExpectRrtype: - if l.value != zRrtpe { - return zp.setParseError("unknown RR type", l) - } - - h.Rrtype = l.torc - - st = zExpectRdata - case zExpectRdata: - r, e, c1 := setRR(*h, zp.c, zp.origin, zp.file) - if e != nil { - // If e.lex is nil than we have encounter a unknown RR type - // in that case we substitute our current lex token - if e.lex.token == "" && e.lex.value == 0 { - e.lex = l // Uh, dirty - } - - zp.parseErr = e - return nil, false - } - - zp.com = c1 - return r, true - } - } - - // If we get here, we and the h.Rrtype is still zero, we haven't parsed anything, this - // is not an error, because an empty zone file is still a zone file. - return nil, false -} - -type zlexer struct { - br io.ByteReader - - readErr error - - line int - column int - - com string - - l lex - - brace int - quote bool - space bool - commt bool - rrtype bool - owner bool - - nextL bool - - eol bool // end-of-line -} - -func newZLexer(r io.Reader) *zlexer { - br, ok := r.(io.ByteReader) - if !ok { - br = bufio.NewReaderSize(r, 1024) - } - - return &zlexer{ - br: br, - - line: 1, - - owner: true, - } -} - -func (zl *zlexer) Err() error { - if zl.readErr == io.EOF { - return nil - } - - return zl.readErr -} - -// readByte returns the next byte from the input -func (zl *zlexer) readByte() (byte, bool) { - if zl.readErr != nil { - return 0, false - } - - c, err := zl.br.ReadByte() - if err != nil { - zl.readErr = err - return 0, false - } - - // delay the newline handling until the next token is delivered, - // fixes off-by-one errors when reporting a parse error. - if zl.eol { - zl.line++ - zl.column = 0 - zl.eol = false - } - - if c == '\n' { - zl.eol = true - } else { - zl.column++ - } - - return c, true -} - -func (zl *zlexer) Next() (lex, bool) { - l := &zl.l - if zl.nextL { - zl.nextL = false - return *l, true - } - if l.err { - // Parsing errors should be sticky. - return lex{value: zEOF}, false - } - - var ( - str [maxTok]byte // Hold string text - com [maxTok]byte // Hold comment text - - stri int // Offset in str (0 means empty) - comi int // Offset in com (0 means empty) - - escape bool - ) - - if zl.com != "" { - comi = copy(com[:], zl.com) - zl.com = "" - } - - for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { - l.line, l.column = zl.line, zl.column - l.comment = "" - - if stri >= len(str) { - l.token = "token length insufficient for parsing" - l.err = true - return *l, true - } - if comi >= len(com) { - l.token = "comment length insufficient for parsing" - l.err = true - return *l, true - } - - switch x { - case ' ', '\t': - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - if zl.commt { - com[comi] = x - comi++ - break - } - - var retL lex - if stri == 0 { - // Space directly in the beginning, handled in the grammar - } else if zl.owner { - // If we have a string and its the first, make it an owner - l.value = zOwner - l.token = string(str[:stri]) - - // escape $... start with a \ not a $, so this will work - switch strings.ToUpper(l.token) { - case "$TTL": - l.value = zDirTTL - case "$ORIGIN": - l.value = zDirOrigin - case "$INCLUDE": - l.value = zDirInclude - case "$GENERATE": - l.value = zDirGenerate - } - - retL = *l - } else { - l.value = zString - l.token = string(str[:stri]) - - if !zl.rrtype { - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; ok { - l.value = zRrtpe - l.torc = t - - zl.rrtype = true - } else if strings.HasPrefix(tokenUpper, "TYPE") { - t, ok := typeToInt(l.token) - if !ok { - l.token = "unknown RR type" - l.err = true - return *l, true - } - - l.value = zRrtpe - l.torc = t - - zl.rrtype = true - } - - if t, ok := StringToClass[tokenUpper]; ok { - l.value = zClass - l.torc = t - } else if strings.HasPrefix(tokenUpper, "CLASS") { - t, ok := classToInt(l.token) - if !ok { - l.token = "unknown class" - l.err = true - return *l, true - } - - l.value = zClass - l.torc = t - } - } - - retL = *l - } - - zl.owner = false - - if !zl.space { - zl.space = true - - l.value = zBlank - l.token = " " - - if retL == (lex{}) { - return *l, true - } - - zl.nextL = true - } - - if retL != (lex{}) { - return retL, true - } - case ';': - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - zl.commt = true - zl.com = "" - - if comi > 1 { - // A newline was previously seen inside a comment that - // was inside braces and we delayed adding it until now. - com[comi] = ' ' // convert newline to space - comi++ - } - - com[comi] = ';' - comi++ - - if stri > 0 { - zl.com = string(com[:comi]) - - l.value = zString - l.token = string(str[:stri]) - return *l, true - } - case '\r': - escape = false - - if zl.quote { - str[stri] = x - stri++ - } - - // discard if outside of quotes - case '\n': - escape = false - - // Escaped newline - if zl.quote { - str[stri] = x - stri++ - break - } - - if zl.commt { - // Reset a comment - zl.commt = false - zl.rrtype = false - - // If not in a brace this ends the comment AND the RR - if zl.brace == 0 { - zl.owner = true - - l.value = zNewline - l.token = "\n" - l.comment = string(com[:comi]) - return *l, true - } - - zl.com = string(com[:comi]) - break - } - - if zl.brace == 0 { - // If there is previous text, we should output it here - var retL lex - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - - if !zl.rrtype { - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; ok { - zl.rrtype = true - - l.value = zRrtpe - l.torc = t - } - } - - retL = *l - } - - l.value = zNewline - l.token = "\n" - l.comment = zl.com - - zl.com = "" - zl.rrtype = false - zl.owner = true - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - } - case '\\': - // comments do not get escaped chars, everything is copied - if zl.commt { - com[comi] = x - comi++ - break - } - - // something already escaped must be in string - if escape { - str[stri] = x - stri++ - - escape = false - break - } - - // something escaped outside of string gets added to string - str[stri] = x - stri++ - - escape = true - case '"': - if zl.commt { - com[comi] = x - comi++ - break - } - - if escape { - str[stri] = x - stri++ - - escape = false - break - } - - zl.space = false - - // send previous gathered text and the quote - var retL lex - if stri != 0 { - l.value = zString - l.token = string(str[:stri]) - - retL = *l - } - - // send quote itself as separate token - l.value = zQuote - l.token = "\"" - - zl.quote = !zl.quote - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - case '(', ')': - if zl.commt { - com[comi] = x - comi++ - break - } - - if escape || zl.quote { - // Inside quotes or escaped this is legal. - str[stri] = x - stri++ - - escape = false - break - } - - switch x { - case ')': - zl.brace-- - - if zl.brace < 0 { - l.token = "extra closing brace" - l.err = true - return *l, true - } - case '(': - zl.brace++ - } - default: - escape = false - - if zl.commt { - com[comi] = x - comi++ - break - } - - str[stri] = x - stri++ - - zl.space = false - } - } - - if zl.readErr != nil && zl.readErr != io.EOF { - // Don't return any tokens after a read error occurs. - return lex{value: zEOF}, false - } - - var retL lex - if stri > 0 { - // Send remainder of str - l.value = zString - l.token = string(str[:stri]) - retL = *l - - if comi <= 0 { - return retL, true - } - } - - if comi > 0 { - // Send remainder of com - l.value = zNewline - l.token = "\n" - l.comment = string(com[:comi]) - - if retL != (lex{}) { - zl.nextL = true - return retL, true - } - - return *l, true - } - - if zl.brace != 0 { - l.comment = "" // in case there was left over string and comment - l.token = "unbalanced brace" - l.err = true - return *l, true - } - - return lex{value: zEOF}, false -} - -// Extract the class number from CLASSxx -func classToInt(token string) (uint16, bool) { - offset := 5 - if len(token) < offset+1 { - return 0, false - } - class, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(class), true -} - -// Extract the rr number from TYPExxx -func typeToInt(token string) (uint16, bool) { - offset := 4 - if len(token) < offset+1 { - return 0, false - } - typ, err := strconv.ParseUint(token[offset:], 10, 16) - if err != nil { - return 0, false - } - return uint16(typ), true -} - -// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. -func stringToTTL(token string) (uint32, bool) { - s := uint32(0) - i := uint32(0) - for _, c := range token { - switch c { - case 's', 'S': - s += i - i = 0 - case 'm', 'M': - s += i * 60 - i = 0 - case 'h', 'H': - s += i * 60 * 60 - i = 0 - case 'd', 'D': - s += i * 60 * 60 * 24 - i = 0 - case 'w', 'W': - s += i * 60 * 60 * 24 * 7 - i = 0 - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - i *= 10 - i += uint32(c) - '0' - default: - return 0, false - } - } - return s + i, true -} - -// Parse LOC records' [.][mM] into a -// mantissa exponent format. Token should contain the entire -// string (i.e. no spaces allowed) -func stringToCm(token string) (e, m uint8, ok bool) { - if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { - token = token[0 : len(token)-1] - } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - case 0: - // huh? - return 0, 0, false - } - ok = true - if meters > 0 { - e = 2 - val = meters - } else { - e = 0 - val = cmeters - } - for val > 10 { - e++ - val /= 10 - } - if e > 9 { - ok = false - } - m = uint8(val) - return -} - -func toAbsoluteName(name, origin string) (absolute string, ok bool) { - // check for an explicit origin reference - if name == "@" { - // require a nonempty origin - if origin == "" { - return "", false - } - return origin, true - } - - // require a valid domain name - _, ok = IsDomainName(name) - if !ok || name == "" { - return "", false - } - - // check if name is already absolute - if name[len(name)-1] == '.' { - return name, true - } - - // require a nonempty origin - if origin == "" { - return "", false - } - return appendOrigin(name, origin), true -} - -func appendOrigin(name, origin string) string { - if origin == "." { - return name + origin - } - return name + "." + origin -} - -// LOC record helper function -func locCheckNorth(token string, latitude uint32) (uint32, bool) { - switch token { - case "n", "N": - return LOC_EQUATOR + latitude, true - case "s", "S": - return LOC_EQUATOR - latitude, true - } - return latitude, false -} - -// LOC record helper function -func locCheckEast(token string, longitude uint32) (uint32, bool) { - switch token { - case "e", "E": - return LOC_EQUATOR + longitude, true - case "w", "W": - return LOC_EQUATOR - longitude, true - } - return longitude, false -} - -// "Eat" the rest of the "line". Return potential comments -func slurpRemainder(c *zlexer, f string) (*ParseError, string) { - l, _ := c.Next() - com := "" - switch l.value { - case zBlank: - l, _ = c.Next() - com = l.comment - if l.value != zNewline && l.value != zEOF { - return &ParseError{f, "garbage after rdata", l}, "" - } - case zNewline: - com = l.comment - case zEOF: - default: - return &ParseError{f, "garbage after rdata", l}, "" - } - return nil, com -} - -// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" -// Used for NID and L64 record. -func stringToNodeID(l lex) (uint64, *ParseError) { - if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - // There must be three colons at fixes postitions, if not its a parse error - if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] - u, err := strconv.ParseUint(s, 16, 64) - if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} - } - return u, nil -} diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go deleted file mode 100644 index 935d22c3f..000000000 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ /dev/null @@ -1,2209 +0,0 @@ -package dns - -import ( - "encoding/base64" - "net" - "strconv" - "strings" -) - -type parserFunc struct { - // Func defines the function that parses the tokens and returns the RR - // or an error. The last string contains any comments in the line as - // they returned by the lexer as well. - Func func(h RR_Header, c *zlexer, origin string, file string) (RR, *ParseError, string) - // Signals if the RR ending is of variable length, like TXT or records - // that have Hexadecimal or Base64 as their last element in the Rdata. Records - // that have a fixed ending or for instance A, AAAA, SOA and etc. - Variable bool -} - -// Parse the rdata of each rrtype. -// All data from the channel c is either zString or zBlank. -// After the rdata there may come a zBlank and then a zNewline -// or immediately a zNewline. If this is not the case we flag -// an *ParseError: garbage after rdata. -func setRR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - parserfunc, ok := typeToparserFunc[h.Rrtype] - if ok { - r, e, cm := parserfunc.Func(h, c, o, f) - if parserfunc.Variable { - return r, e, cm - } - if e != nil { - return nil, e, "" - } - e, cm = slurpRemainder(c, f) - if e != nil { - return nil, e, "" - } - return r, nil, cm - } - // RFC3957 RR (Unknown RR handling) - return setRFC3597(h, c, o, f) -} - -// A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) -// or an error -func endingToString(c *zlexer, errstr, f string) (string, *ParseError, string) { - s := "" - l, _ := c.Next() // zString - for l.value != zNewline && l.value != zEOF { - if l.err { - return s, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - s += l.token - case zBlank: // Ok - default: - return "", &ParseError{f, errstr, l}, "" - } - l, _ = c.Next() - } - return s, nil, l.comment -} - -// A remainder of the rdata with embedded spaces, split on unquoted whitespace -// and return the parsed string slice or an error -func endingToTxtSlice(c *zlexer, errstr, f string) ([]string, *ParseError, string) { - // Get the remaining data until we see a zNewline - l, _ := c.Next() - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - - // Build the slice - s := make([]string, 0) - quote := false - empty := false - for l.value != zNewline && l.value != zEOF { - if l.err { - return nil, &ParseError{f, errstr, l}, "" - } - switch l.value { - case zString: - empty = false - if len(l.token) > 255 { - // split up tokens that are larger than 255 into 255-chunks - sx := []string{} - p, i := 0, 255 - for { - if i <= len(l.token) { - sx = append(sx, l.token[p:i]) - } else { - sx = append(sx, l.token[p:]) - break - - } - p, i = p+255, i+255 - } - s = append(s, sx...) - break - } - - s = append(s, l.token) - case zBlank: - if quote { - // zBlank can only be seen in between txt parts. - return nil, &ParseError{f, errstr, l}, "" - } - case zQuote: - if empty && quote { - s = append(s, "") - } - quote = !quote - empty = true - default: - return nil, &ParseError{f, errstr, l}, "" - } - l, _ = c.Next() - } - if quote { - return nil, &ParseError{f, errstr, l}, "" - } - return s, nil, l.comment -} - -func setA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(A) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - rr.A = net.ParseIP(l.token) - if rr.A == nil || l.err { - return nil, &ParseError{f, "bad A A", l}, "" - } - return rr, nil, "" -} - -func setAAAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AAAA) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - rr.AAAA = net.ParseIP(l.token) - if rr.AAAA == nil || l.err { - return nil, &ParseError{f, "bad AAAA AAAA", l}, "" - } - return rr, nil, "" -} - -func setNS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NS) - rr.Hdr = h - - l, _ := c.Next() - rr.Ns = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad NS Ns", l}, "" - } - rr.Ns = name - return rr, nil, "" -} - -func setPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(PTR) - rr.Hdr = h - - l, _ := c.Next() - rr.Ptr = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad PTR Ptr", l}, "" - } - rr.Ptr = name - return rr, nil, "" -} - -func setNSAPPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSAPPTR) - rr.Hdr = h - - l, _ := c.Next() - rr.Ptr = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad NSAP-PTR Ptr", l}, "" - } - rr.Ptr = name - return rr, nil, "" -} - -func setRP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RP) - rr.Hdr = h - - l, _ := c.Next() - rr.Mbox = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return nil, &ParseError{f, "bad RP Mbox", l}, "" - } - rr.Mbox = mbox - - c.Next() // zBlank - l, _ = c.Next() - rr.Txt = l.token - - txt, txtOk := toAbsoluteName(l.token, o) - if l.err || !txtOk { - return nil, &ParseError{f, "bad RP Txt", l}, "" - } - rr.Txt = txt - - return rr, nil, "" -} - -func setMR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MR) - rr.Hdr = h - - l, _ := c.Next() - rr.Mr = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MR Mr", l}, "" - } - rr.Mr = name - return rr, nil, "" -} - -func setMB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MB) - rr.Hdr = h - - l, _ := c.Next() - rr.Mb = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MB Mb", l}, "" - } - rr.Mb = name - return rr, nil, "" -} - -func setMG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MG) - rr.Hdr = h - - l, _ := c.Next() - rr.Mg = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MG Mg", l}, "" - } - rr.Mg = name - return rr, nil, "" -} - -func setHINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(HINFO) - rr.Hdr = h - - chunks, e, c1 := endingToTxtSlice(c, "bad HINFO Fields", f) - if e != nil { - return nil, e, c1 - } - - if ln := len(chunks); ln == 0 { - return rr, nil, "" - } else if ln == 1 { - // Can we split it? - if out := strings.Fields(chunks[0]); len(out) > 1 { - chunks = out - } else { - chunks = append(chunks, "") - } - } - - rr.Cpu = chunks[0] - rr.Os = strings.Join(chunks[1:], " ") - - return rr, nil, "" -} - -func setMINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MINFO) - rr.Hdr = h - - l, _ := c.Next() - rr.Rmail = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - rmail, rmailOk := toAbsoluteName(l.token, o) - if l.err || !rmailOk { - return nil, &ParseError{f, "bad MINFO Rmail", l}, "" - } - rr.Rmail = rmail - - c.Next() // zBlank - l, _ = c.Next() - rr.Email = l.token - - email, emailOk := toAbsoluteName(l.token, o) - if l.err || !emailOk { - return nil, &ParseError{f, "bad MINFO Email", l}, "" - } - rr.Email = email - - return rr, nil, "" -} - -func setMF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MF) - rr.Hdr = h - - l, _ := c.Next() - rr.Mf = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MF Mf", l}, "" - } - rr.Mf = name - return rr, nil, "" -} - -func setMD(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MD) - rr.Hdr = h - - l, _ := c.Next() - rr.Md = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MD Md", l}, "" - } - rr.Md = name - return rr, nil, "" -} - -func setMX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(MX) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad MX Pref", l}, "" - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Mx = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad MX Mx", l}, "" - } - rr.Mx = name - - return rr, nil, "" -} - -func setRT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RT) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil { - return nil, &ParseError{f, "bad RT Preference", l}, "" - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Host = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad RT Host", l}, "" - } - rr.Host = name - - return rr, nil, "" -} - -func setAFSDB(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AFSDB) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad AFSDB Subtype", l}, "" - } - rr.Subtype = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Hostname = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad AFSDB Hostname", l}, "" - } - rr.Hostname = name - return rr, nil, "" -} - -func setX25(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(X25) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - if l.err { - return nil, &ParseError{f, "bad X25 PSDNAddress", l}, "" - } - rr.PSDNAddress = l.token - return rr, nil, "" -} - -func setKX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(KX) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad KX Pref", l}, "" - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Exchanger = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad KX Exchanger", l}, "" - } - rr.Exchanger = name - return rr, nil, "" -} - -func setCNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CNAME) - rr.Hdr = h - - l, _ := c.Next() - rr.Target = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad CNAME Target", l}, "" - } - rr.Target = name - return rr, nil, "" -} - -func setDNAME(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(DNAME) - rr.Hdr = h - - l, _ := c.Next() - rr.Target = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad DNAME Target", l}, "" - } - rr.Target = name - return rr, nil, "" -} - -func setSOA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SOA) - rr.Hdr = h - - l, _ := c.Next() - rr.Ns = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - ns, nsOk := toAbsoluteName(l.token, o) - if l.err || !nsOk { - return nil, &ParseError{f, "bad SOA Ns", l}, "" - } - rr.Ns = ns - - c.Next() // zBlank - l, _ = c.Next() - rr.Mbox = l.token - - mbox, mboxOk := toAbsoluteName(l.token, o) - if l.err || !mboxOk { - return nil, &ParseError{f, "bad SOA Mbox", l}, "" - } - rr.Mbox = mbox - - c.Next() // zBlank - - var ( - v uint32 - ok bool - ) - for i := 0; i < 5; i++ { - l, _ = c.Next() - if l.err { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - if j, e := strconv.ParseUint(l.token, 10, 32); e != nil { - if i == 0 { - // Serial must be a number - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - } - // We allow other fields to be unitful duration strings - if v, ok = stringToTTL(l.token); !ok { - return nil, &ParseError{f, "bad SOA zone parameter", l}, "" - - } - } else { - v = uint32(j) - } - switch i { - case 0: - rr.Serial = v - c.Next() // zBlank - case 1: - rr.Refresh = v - c.Next() // zBlank - case 2: - rr.Retry = v - c.Next() // zBlank - case 3: - rr.Expire = v - c.Next() // zBlank - case 4: - rr.Minttl = v - } - } - return rr, nil, "" -} - -func setSRV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SRV) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Priority", l}, "" - } - rr.Priority = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Weight", l}, "" - } - rr.Weight = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad SRV Port", l}, "" - } - rr.Port = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Target = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad SRV Target", l}, "" - } - rr.Target = name - return rr, nil, "" -} - -func setNAPTR(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NAPTR) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Order", l}, "" - } - rr.Order = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NAPTR Preference", l}, "" - } - rr.Preference = uint16(i) - - // Flags - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Flags = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - } else if l.value == zQuote { - rr.Flags = "" - } else { - return nil, &ParseError{f, "bad NAPTR Flags", l}, "" - } - - // Service - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Service = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - } else if l.value == zQuote { - rr.Service = "" - } else { - return nil, &ParseError{f, "bad NAPTR Service", l}, "" - } - - // Regexp - c.Next() // zBlank - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - l, _ = c.Next() // Either String or Quote - if l.value == zString { - rr.Regexp = l.token - l, _ = c.Next() // _QUOTE - if l.value != zQuote { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - } else if l.value == zQuote { - rr.Regexp = "" - } else { - return nil, &ParseError{f, "bad NAPTR Regexp", l}, "" - } - - // After quote no space?? - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Replacement = l.token - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad NAPTR Replacement", l}, "" - } - rr.Replacement = name - return rr, nil, "" -} - -func setTALINK(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TALINK) - rr.Hdr = h - - l, _ := c.Next() - rr.PreviousName = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - previousName, previousNameOk := toAbsoluteName(l.token, o) - if l.err || !previousNameOk { - return nil, &ParseError{f, "bad TALINK PreviousName", l}, "" - } - rr.PreviousName = previousName - - c.Next() // zBlank - l, _ = c.Next() - rr.NextName = l.token - - nextName, nextNameOk := toAbsoluteName(l.token, o) - if l.err || !nextNameOk { - return nil, &ParseError{f, "bad TALINK NextName", l}, "" - } - rr.NextName = nextName - - return rr, nil, "" -} - -func setLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(LOC) - rr.Hdr = h - // Non zero defaults for LOC record, see RFC 1876, Section 3. - rr.HorizPre = 165 // 10000 - rr.VertPre = 162 // 10 - rr.Size = 18 // 1 - ok := false - - // North - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude", l}, "" - } - rr.Latitude = 1000 * 60 * 60 * uint32(i) - - c.Next() // zBlank - // Either number, 'N' or 'S' - l, _ = c.Next() - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - i, e = strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude minutes", l}, "" - } - rr.Latitude += 1000 * 60 * uint32(i) - - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Latitude seconds", l}, "" - } else { - rr.Latitude += uint32(1000 * i) - } - c.Next() // zBlank - // Either number, 'N' or 'S' - l, _ = c.Next() - if rr.Latitude, ok = locCheckNorth(l.token, rr.Latitude); ok { - goto East - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Latitude North/South", l}, "" - -East: - // East - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude", l}, "" - } else { - rr.Longitude = 1000 * 60 * 60 * uint32(i) - } - c.Next() // zBlank - // Either number, 'E' or 'W' - l, _ = c.Next() - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - if i, e := strconv.ParseUint(l.token, 10, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude minutes", l}, "" - } else { - rr.Longitude += 1000 * 60 * uint32(i) - } - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseFloat(l.token, 32); e != nil || l.err { - return nil, &ParseError{f, "bad LOC Longitude seconds", l}, "" - } else { - rr.Longitude += uint32(1000 * i) - } - c.Next() // zBlank - // Either number, 'E' or 'W' - l, _ = c.Next() - if rr.Longitude, ok = locCheckEast(l.token, rr.Longitude); ok { - goto Altitude - } - // If still alive, flag an error - return nil, &ParseError{f, "bad LOC Longitude East/West", l}, "" - -Altitude: - c.Next() // zBlank - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } - if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { - l.token = l.token[0 : len(l.token)-1] - } - if i, e := strconv.ParseFloat(l.token, 32); e != nil { - return nil, &ParseError{f, "bad LOC Altitude", l}, "" - } else { - rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) - } - - // And now optionally the other values - l, _ = c.Next() - count := 0 - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - switch count { - case 0: // Size - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC Size", l}, "" - } - rr.Size = e&0x0f | m<<4&0xf0 - case 1: // HorizPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC HorizPre", l}, "" - } - rr.HorizPre = e&0x0f | m<<4&0xf0 - case 2: // VertPre - e, m, ok := stringToCm(l.token) - if !ok { - return nil, &ParseError{f, "bad LOC VertPre", l}, "" - } - rr.VertPre = e&0x0f | m<<4&0xf0 - } - count++ - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad LOC Size, HorizPre or VertPre", l}, "" - } - l, _ = c.Next() - } - return rr, nil, "" -} - -func setHIP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(HIP) - rr.Hdr = h - - // HitLength is not represented - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad HIP PublicKeyAlgorithm", l}, "" - } - rr.PublicKeyAlgorithm = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad HIP Hit", l}, "" - } - rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. - rr.HitLength = uint8(len(rr.Hit)) / 2 - - c.Next() // zBlank - l, _ = c.Next() // zString - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad HIP PublicKey", l}, "" - } - rr.PublicKey = l.token // This cannot contain spaces - rr.PublicKeyLength = uint16(base64.StdEncoding.DecodedLen(len(rr.PublicKey))) - - // RendezvousServers (if any) - l, _ = c.Next() - var xs []string - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zString: - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - xs = append(xs, name) - case zBlank: - // Ok - default: - return nil, &ParseError{f, "bad HIP RendezvousServers", l}, "" - } - l, _ = c.Next() - } - rr.RendezvousServers = xs - return rr, nil, l.comment -} - -func setCERT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CERT) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - if v, ok := StringToCertType[l.token]; ok { - rr.Type = v - } else if i, e := strconv.ParseUint(l.token, 10, 16); e != nil { - return nil, &ParseError{f, "bad CERT Type", l}, "" - } else { - rr.Type = uint16(i) - } - c.Next() // zBlank - l, _ = c.Next() // zString - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad CERT KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - if v, ok := StringToAlgorithm[l.token]; ok { - rr.Algorithm = v - } else if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - return nil, &ParseError{f, "bad CERT Algorithm", l}, "" - } else { - rr.Algorithm = uint8(i) - } - s, e1, c1 := endingToString(c, "bad CERT Certificate", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setOPENPGPKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(OPENPGPKEY) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad OPENPGPKEY PublicKey", f) - if e != nil { - return nil, e, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setCSYNC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CSYNC) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - j, e := strconv.ParseUint(l.token, 10, 32) - if e != nil { - // Serial must be a number - return nil, &ParseError{f, "bad CSYNC serial", l}, "" - } - rr.Serial = uint32(j) - - c.Next() // zBlank - - l, _ = c.Next() - j, e = strconv.ParseUint(l.token, 10, 16) - if e != nil { - // Serial must be a number - return nil, &ParseError{f, "bad CSYNC flags", l}, "" - } - rr.Flags = uint16(j) - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad CSYNC TypeBitMap", l}, "" - } - l, _ = c.Next() - } - return rr, nil, l.comment -} - -func setSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setRRSIG(h, c, o, f) - if r != nil { - return &SIG{*r.(*RRSIG)}, e, s - } - return nil, e, s -} - -func setRRSIG(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RRSIG) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - tokenUpper := strings.ToUpper(l.token) - if t, ok := StringToType[tokenUpper]; !ok { - if strings.HasPrefix(tokenUpper, "TYPE") { - t, ok = typeToInt(l.token) - if !ok { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - rr.TypeCovered = t - } else { - return nil, &ParseError{f, "bad RRSIG Typecovered", l}, "" - } - } else { - rr.TypeCovered = t - } - - c.Next() // zBlank - l, _ = c.Next() - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG Labels", l}, "" - } - rr.Labels = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 32) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG OrigTtl", l}, "" - } - rr.OrigTtl = uint32(i) - - c.Next() // zBlank - l, _ = c.Next() - if i, err := StringToTime(l.token); err != nil { - // Try to see if all numeric and use it as epoch - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - // TODO(miek): error out on > MAX_UINT32, same below - rr.Expiration = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Expiration", l}, "" - } - } else { - rr.Expiration = i - } - - c.Next() // zBlank - l, _ = c.Next() - if i, err := StringToTime(l.token); err != nil { - if i, err := strconv.ParseInt(l.token, 10, 64); err == nil { - rr.Inception = uint32(i) - } else { - return nil, &ParseError{f, "bad RRSIG Inception", l}, "" - } - } else { - rr.Inception = i - } - - c.Next() // zBlank - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 16) - if err != nil || l.err { - return nil, &ParseError{f, "bad RRSIG KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() - rr.SignerName = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad RRSIG SignerName", l}, "" - } - rr.SignerName = name - - s, e, c1 := endingToString(c, "bad RRSIG Signature", f) - if e != nil { - return nil, e, c1 - } - rr.Signature = s - - return rr, nil, c1 -} - -func setNSEC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC) - rr.Hdr = h - - l, _ := c.Next() - rr.NextDomain = l.token - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad NSEC NextDomain", l}, "" - } - rr.NextDomain = name - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC TypeBitMap", l}, "" - } - l, _ = c.Next() - } - return rr, nil, l.comment -} - -func setNSEC3(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Hash", l}, "" - } - rr.Hash = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Flags", l}, "" - } - rr.Flags = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3 Iterations", l}, "" - } - rr.Iterations = uint16(i) - c.Next() - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 Salt", l}, "" - } - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) / 2 - rr.Salt = l.token - } - - c.Next() - l, _ = c.Next() - if len(l.token) == 0 || l.err { - return nil, &ParseError{f, "bad NSEC3 NextDomain", l}, "" - } - rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) - rr.NextDomain = l.token - - rr.TypeBitMap = make([]uint16, 0) - var ( - k uint16 - ok bool - ) - l, _ = c.Next() - for l.value != zNewline && l.value != zEOF { - switch l.value { - case zBlank: - // Ok - case zString: - tokenUpper := strings.ToUpper(l.token) - if k, ok = StringToType[tokenUpper]; !ok { - if k, ok = typeToInt(l.token); !ok { - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - } - rr.TypeBitMap = append(rr.TypeBitMap, k) - default: - return nil, &ParseError{f, "bad NSEC3 TypeBitMap", l}, "" - } - l, _ = c.Next() - } - return rr, nil, l.comment -} - -func setNSEC3PARAM(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NSEC3PARAM) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Hash", l}, "" - } - rr.Hash = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Flags", l}, "" - } - rr.Flags = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NSEC3PARAM Iterations", l}, "" - } - rr.Iterations = uint16(i) - c.Next() - l, _ = c.Next() - if l.token != "-" { - rr.SaltLength = uint8(len(l.token)) - rr.Salt = l.token - } - return rr, nil, "" -} - -func setEUI48(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EUI48) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - if len(l.token) != 17 || l.err { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - addr := make([]byte, 12) - dash := 0 - for i := 0; i < 10; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - } - addr[10] = l.token[15] - addr[11] = l.token[16] - - i, e := strconv.ParseUint(string(addr), 16, 48) - if e != nil { - return nil, &ParseError{f, "bad EUI48 Address", l}, "" - } - rr.Address = i - return rr, nil, "" -} - -func setEUI64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EUI64) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - if len(l.token) != 23 || l.err { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - addr := make([]byte, 16) - dash := 0 - for i := 0; i < 14; i += 2 { - addr[i] = l.token[i+dash] - addr[i+1] = l.token[i+1+dash] - dash++ - if l.token[i+1+dash] != '-' { - return nil, &ParseError{f, "bad EUI64 Address", l}, "" - } - } - addr[14] = l.token[21] - addr[15] = l.token[22] - - i, e := strconv.ParseUint(string(addr), 16, 64) - if e != nil { - return nil, &ParseError{f, "bad EUI68 Address", l}, "" - } - rr.Address = uint64(i) - return rr, nil, "" -} - -func setSSHFP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SSHFP) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SSHFP Type", l}, "" - } - rr.Type = uint8(i) - c.Next() // zBlank - s, e1, c1 := endingToString(c, "bad SSHFP Fingerprint", f) - if e1 != nil { - return nil, e1, c1 - } - rr.FingerPrint = s - return rr, nil, "" -} - -func setDNSKEYs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { - rr := new(DNSKEY) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Flags", l}, "" - } - rr.Flags = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Protocol", l}, "" - } - rr.Protocol = uint8(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "KEY") - if r != nil { - return &KEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "DNSKEY") - return r, e, s -} - -func setCDNSKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDNSKEYs(h, c, o, f, "CDNSKEY") - if r != nil { - return &CDNSKEY{*r.(*DNSKEY)}, e, s - } - return nil, e, s -} - -func setRKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RKEY) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Flags", l}, "" - } - rr.Flags = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Protocol", l}, "" - } - rr.Protocol = uint8(i) - c.Next() // zBlank - l, _ = c.Next() // zString - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad RKEY Algorithm", l}, "" - } - rr.Algorithm = uint8(i) - s, e1, c1 := endingToString(c, "bad RKEY PublicKey", f) - if e1 != nil { - return nil, e1, c1 - } - rr.PublicKey = s - return rr, nil, c1 -} - -func setEID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(EID) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad EID Endpoint", f) - if e != nil { - return nil, e, c1 - } - rr.Endpoint = s - return rr, nil, c1 -} - -func setNIMLOC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NIMLOC) - rr.Hdr = h - s, e, c1 := endingToString(c, "bad NIMLOC Locator", f) - if e != nil { - return nil, e, c1 - } - rr.Locator = s - return rr, nil, c1 -} - -func setGPOS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(GPOS) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - _, e := strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Longitude", l}, "" - } - rr.Longitude = l.token - c.Next() // zBlank - l, _ = c.Next() - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Latitude", l}, "" - } - rr.Latitude = l.token - c.Next() // zBlank - l, _ = c.Next() - _, e = strconv.ParseFloat(l.token, 64) - if e != nil || l.err { - return nil, &ParseError{f, "bad GPOS Altitude", l}, "" - } - rr.Altitude = l.token - return rr, nil, "" -} - -func setDSs(h RR_Header, c *zlexer, o, f, typ string) (RR, *ParseError, string) { - rr := new(DS) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if i, e = strconv.ParseUint(l.token, 10, 8); e != nil { - tokenUpper := strings.ToUpper(l.token) - i, ok := StringToAlgorithm[tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad " + typ + " Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad " + typ + " DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, e1, c1 := endingToString(c, "bad "+typ+" Digest", f) - if e1 != nil { - return nil, e1, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DS") - return r, e, s -} - -func setDLV(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "DLV") - if r != nil { - return &DLV{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setCDS(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - r, e, s := setDSs(h, c, o, f, "CDS") - if r != nil { - return &CDS{*r.(*DS)}, e, s - } - return nil, e, s -} - -func setTA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TA) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA KeyTag", l}, "" - } - rr.KeyTag = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if i, e := strconv.ParseUint(l.token, 10, 8); e != nil { - tokenUpper := strings.ToUpper(l.token) - i, ok := StringToAlgorithm[tokenUpper] - if !ok || l.err { - return nil, &ParseError{f, "bad TA Algorithm", l}, "" - } - rr.Algorithm = i - } else { - rr.Algorithm = uint8(i) - } - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TA DigestType", l}, "" - } - rr.DigestType = uint8(i) - s, err, c1 := endingToString(c, "bad TA Digest", f) - if err != nil { - return nil, err, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setTLSA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TLSA) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Usage", l}, "" - } - rr.Usage = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA Selector", l}, "" - } - rr.Selector = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad TLSA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad TLSA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setSMIMEA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SMIMEA) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Usage", l}, "" - } - rr.Usage = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA Selector", l}, "" - } - rr.Selector = uint8(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return nil, &ParseError{f, "bad SMIMEA MatchingType", l}, "" - } - rr.MatchingType = uint8(i) - // So this needs be e2 (i.e. different than e), because...??t - s, e2, c1 := endingToString(c, "bad SMIMEA Certificate", f) - if e2 != nil { - return nil, e2, c1 - } - rr.Certificate = s - return rr, nil, c1 -} - -func setRFC3597(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(RFC3597) - rr.Hdr = h - - l, _ := c.Next() - if l.token != "\\#" { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - - c.Next() // zBlank - l, _ = c.Next() - rdlength, e := strconv.Atoi(l.token) - if e != nil || l.err { - return nil, &ParseError{f, "bad RFC3597 Rdata ", l}, "" - } - - s, e1, c1 := endingToString(c, "bad RFC3597 Rdata", f) - if e1 != nil { - return nil, e1, c1 - } - if rdlength*2 != len(s) { - return nil, &ParseError{f, "bad RFC3597 Rdata", l}, "" - } - rr.Rdata = s - return rr, nil, c1 -} - -func setSPF(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(SPF) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad SPF Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setAVC(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(AVC) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad AVC Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -func setTXT(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TXT) - rr.Hdr = h - - // no zBlank reading here, because all this rdata is TXT - s, e, c1 := endingToTxtSlice(c, "bad TXT Txt", f) - if e != nil { - return nil, e, "" - } - rr.Txt = s - return rr, nil, c1 -} - -// identical to setTXT -func setNINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad NINFO ZSData", f) - if e != nil { - return nil, e, "" - } - rr.ZSData = s - return rr, nil, c1 -} - -func setURI(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(URI) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Priority", l}, "" - } - rr.Priority = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - i, e = strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad URI Weight", l}, "" - } - rr.Weight = uint16(i) - - c.Next() // zBlank - s, err, c1 := endingToTxtSlice(c, "bad URI Target", f) - if err != nil { - return nil, err, "" - } - if len(s) != 1 { - return nil, &ParseError{f, "bad URI Target", l}, "" - } - rr.Target = s[0] - return rr, nil, c1 -} - -func setDHCID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - // awesome record to parse! - rr := new(DHCID) - rr.Hdr = h - - s, e, c1 := endingToString(c, "bad DHCID Digest", f) - if e != nil { - return nil, e, c1 - } - rr.Digest = s - return rr, nil, c1 -} - -func setNID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(NID) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad NID Preference", l}, "" - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.NodeID = u - return rr, nil, "" -} - -func setL32(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(L32) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad L32 Preference", l}, "" - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Locator32 = net.ParseIP(l.token) - if rr.Locator32 == nil || l.err { - return nil, &ParseError{f, "bad L32 Locator", l}, "" - } - return rr, nil, "" -} - -func setLP(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(LP) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad LP Preference", l}, "" - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Fqdn = l.token - name, nameOk := toAbsoluteName(l.token, o) - if l.err || !nameOk { - return nil, &ParseError{f, "bad LP Fqdn", l}, "" - } - rr.Fqdn = name - - return rr, nil, "" -} - -func setL64(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(L64) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad L64 Preference", l}, "" - } - rr.Preference = uint16(i) - c.Next() // zBlank - l, _ = c.Next() // zString - u, err := stringToNodeID(l) - if err != nil || l.err { - return nil, err, "" - } - rr.Locator64 = u - return rr, nil, "" -} - -func setUID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(UID) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad UID Uid", l}, "" - } - rr.Uid = uint32(i) - return rr, nil, "" -} - -func setGID(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(GID) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 32) - if e != nil || l.err { - return nil, &ParseError{f, "bad GID Gid", l}, "" - } - rr.Gid = uint32(i) - return rr, nil, "" -} - -func setUINFO(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(UINFO) - rr.Hdr = h - - s, e, c1 := endingToTxtSlice(c, "bad UINFO Uinfo", f) - if e != nil { - return nil, e, c1 - } - if ln := len(s); ln == 0 { - return rr, nil, c1 - } - rr.Uinfo = s[0] // silently discard anything after the first character-string - return rr, nil, c1 -} - -func setPX(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(PX) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, "" - } - - i, e := strconv.ParseUint(l.token, 10, 16) - if e != nil || l.err { - return nil, &ParseError{f, "bad PX Preference", l}, "" - } - rr.Preference = uint16(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Map822 = l.token - map822, map822Ok := toAbsoluteName(l.token, o) - if l.err || !map822Ok { - return nil, &ParseError{f, "bad PX Map822", l}, "" - } - rr.Map822 = map822 - - c.Next() // zBlank - l, _ = c.Next() // zString - rr.Mapx400 = l.token - mapx400, mapx400Ok := toAbsoluteName(l.token, o) - if l.err || !mapx400Ok { - return nil, &ParseError{f, "bad PX Mapx400", l}, "" - } - rr.Mapx400 = mapx400 - - return rr, nil, "" -} - -func setCAA(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(CAA) - rr.Hdr = h - - l, _ := c.Next() - if len(l.token) == 0 { // dynamic update rr. - return rr, nil, l.comment - } - - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad CAA Flag", l}, "" - } - rr.Flag = uint8(i) - - c.Next() // zBlank - l, _ = c.Next() // zString - if l.value != zString { - return nil, &ParseError{f, "bad CAA Tag", l}, "" - } - rr.Tag = l.token - - c.Next() // zBlank - s, e, c1 := endingToTxtSlice(c, "bad CAA Value", f) - if e != nil { - return nil, e, "" - } - if len(s) != 1 { - return nil, &ParseError{f, "bad CAA Value", l}, "" - } - rr.Value = s[0] - return rr, nil, c1 -} - -func setTKEY(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) { - rr := new(TKEY) - rr.Hdr = h - - l, _ := c.Next() - - // Algorithm - if l.value != zString { - return nil, &ParseError{f, "bad TKEY algorithm", l}, "" - } - rr.Algorithm = l.token - c.Next() // zBlank - - // Get the key length and key values - l, _ = c.Next() - i, err := strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY key length", l}, "" - } - rr.KeySize = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if l.value != zString { - return nil, &ParseError{f, "bad TKEY key", l}, "" - } - rr.Key = l.token - c.Next() // zBlank - - // Get the otherdata length and string data - l, _ = c.Next() - i, err = strconv.ParseUint(l.token, 10, 8) - if err != nil || l.err { - return nil, &ParseError{f, "bad TKEY otherdata length", l}, "" - } - rr.OtherLen = uint16(i) - c.Next() // zBlank - l, _ = c.Next() - if l.value != zString { - return nil, &ParseError{f, "bad TKEY otherday", l}, "" - } - rr.OtherData = l.token - - return rr, nil, "" -} - -var typeToparserFunc = map[uint16]parserFunc{ - TypeAAAA: {setAAAA, false}, - TypeAFSDB: {setAFSDB, false}, - TypeA: {setA, false}, - TypeCAA: {setCAA, true}, - TypeCDS: {setCDS, true}, - TypeCDNSKEY: {setCDNSKEY, true}, - TypeCERT: {setCERT, true}, - TypeCNAME: {setCNAME, false}, - TypeCSYNC: {setCSYNC, true}, - TypeDHCID: {setDHCID, true}, - TypeDLV: {setDLV, true}, - TypeDNAME: {setDNAME, false}, - TypeKEY: {setKEY, true}, - TypeDNSKEY: {setDNSKEY, true}, - TypeDS: {setDS, true}, - TypeEID: {setEID, true}, - TypeEUI48: {setEUI48, false}, - TypeEUI64: {setEUI64, false}, - TypeGID: {setGID, false}, - TypeGPOS: {setGPOS, false}, - TypeHINFO: {setHINFO, true}, - TypeHIP: {setHIP, true}, - TypeKX: {setKX, false}, - TypeL32: {setL32, false}, - TypeL64: {setL64, false}, - TypeLOC: {setLOC, true}, - TypeLP: {setLP, false}, - TypeMB: {setMB, false}, - TypeMD: {setMD, false}, - TypeMF: {setMF, false}, - TypeMG: {setMG, false}, - TypeMINFO: {setMINFO, false}, - TypeMR: {setMR, false}, - TypeMX: {setMX, false}, - TypeNAPTR: {setNAPTR, false}, - TypeNID: {setNID, false}, - TypeNIMLOC: {setNIMLOC, true}, - TypeNINFO: {setNINFO, true}, - TypeNSAPPTR: {setNSAPPTR, false}, - TypeNSEC3PARAM: {setNSEC3PARAM, false}, - TypeNSEC3: {setNSEC3, true}, - TypeNSEC: {setNSEC, true}, - TypeNS: {setNS, false}, - TypeOPENPGPKEY: {setOPENPGPKEY, true}, - TypePTR: {setPTR, false}, - TypePX: {setPX, false}, - TypeSIG: {setSIG, true}, - TypeRKEY: {setRKEY, true}, - TypeRP: {setRP, false}, - TypeRRSIG: {setRRSIG, true}, - TypeRT: {setRT, false}, - TypeSMIMEA: {setSMIMEA, true}, - TypeSOA: {setSOA, false}, - TypeSPF: {setSPF, true}, - TypeAVC: {setAVC, true}, - TypeSRV: {setSRV, false}, - TypeSSHFP: {setSSHFP, true}, - TypeTALINK: {setTALINK, false}, - TypeTA: {setTA, true}, - TypeTLSA: {setTLSA, true}, - TypeTXT: {setTXT, true}, - TypeUID: {setUID, false}, - TypeUINFO: {setUINFO, true}, - TypeURI: {setURI, true}, - TypeX25: {setX25, false}, - TypeTKEY: {setTKEY, true}, -} diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go deleted file mode 100644 index ae304db53..000000000 --- a/vendor/github.com/miekg/dns/serve_mux.go +++ /dev/null @@ -1,147 +0,0 @@ -package dns - -import ( - "strings" - "sync" -) - -// ServeMux is an DNS request multiplexer. It matches the zone name of -// each incoming request against a list of registered patterns add calls -// the handler for the pattern that most closely matches the zone name. -// -// ServeMux is DNSSEC aware, meaning that queries for the DS record are -// redirected to the parent zone (if that is also registered), otherwise -// the child gets the query. -// -// ServeMux is also safe for concurrent access from multiple goroutines. -// -// The zero ServeMux is empty and ready for use. -type ServeMux struct { - z map[string]Handler - m sync.RWMutex -} - -// NewServeMux allocates and returns a new ServeMux. -func NewServeMux() *ServeMux { - return new(ServeMux) -} - -// DefaultServeMux is the default ServeMux used by Serve. -var DefaultServeMux = NewServeMux() - -func (mux *ServeMux) match(q string, t uint16) Handler { - mux.m.RLock() - defer mux.m.RUnlock() - if mux.z == nil { - return nil - } - - var handler Handler - - // TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575 - // lands in a go release, replace the following with strings.ToLower. - var sb strings.Builder - for i := 0; i < len(q); i++ { - c := q[i] - if !(c >= 'A' && c <= 'Z') { - continue - } - - sb.Grow(len(q)) - sb.WriteString(q[:i]) - - for ; i < len(q); i++ { - c := q[i] - if c >= 'A' && c <= 'Z' { - c += 'a' - 'A' - } - - sb.WriteByte(c) - } - - q = sb.String() - break - } - - for off, end := 0, false; !end; off, end = NextLabel(q, off) { - if h, ok := mux.z[q[off:]]; ok { - if t != TypeDS { - return h - } - // Continue for DS to see if we have a parent too, if so delegate to the parent - handler = h - } - } - - // Wildcard match, if we have found nothing try the root zone as a last resort. - if h, ok := mux.z["."]; ok { - return h - } - - return handler -} - -// Handle adds a handler to the ServeMux for pattern. -func (mux *ServeMux) Handle(pattern string, handler Handler) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - if mux.z == nil { - mux.z = make(map[string]Handler) - } - mux.z[Fqdn(pattern)] = handler - mux.m.Unlock() -} - -// HandleFunc adds a handler function to the ServeMux for pattern. -func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - mux.Handle(pattern, HandlerFunc(handler)) -} - -// HandleRemove deregisters the handler specific for pattern from the ServeMux. -func (mux *ServeMux) HandleRemove(pattern string) { - if pattern == "" { - panic("dns: invalid pattern " + pattern) - } - mux.m.Lock() - delete(mux.z, Fqdn(pattern)) - mux.m.Unlock() -} - -// ServeDNS dispatches the request to the handler whose pattern most -// closely matches the request message. -// -// ServeDNS is DNSSEC aware, meaning that queries for the DS record -// are redirected to the parent zone (if that is also registered), -// otherwise the child gets the query. -// -// If no handler is found, or there is no question, a standard SERVFAIL -// message is returned -func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) { - var h Handler - if len(req.Question) >= 1 { // allow more than one question - h = mux.match(req.Question[0].Name, req.Question[0].Qtype) - } - - if h != nil { - h.ServeDNS(w, req) - } else { - HandleFailed(w, req) - } -} - -// Handle registers the handler with the given pattern -// in the DefaultServeMux. The documentation for -// ServeMux explains how patterns are matched. -func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } - -// HandleRemove deregisters the handle with the given pattern -// in the DefaultServeMux. -func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) } - -// HandleFunc registers the handler function with the given pattern -// in the DefaultServeMux. -func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) { - DefaultServeMux.HandleFunc(pattern, handler) -} diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go deleted file mode 100644 index 6abbed512..000000000 --- a/vendor/github.com/miekg/dns/server.go +++ /dev/null @@ -1,868 +0,0 @@ -// DNS server implementation. - -package dns - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/binary" - "errors" - "io" - "net" - "strings" - "sync" - "sync/atomic" - "time" -) - -// Default maximum number of TCP queries before we close the socket. -const maxTCPQueries = 128 - -// The maximum number of idle workers. -// -// This controls the maximum number of workers that are allowed to stay -// idle waiting for incoming requests before being torn down. -// -// If this limit is reached, the server will just keep spawning new -// workers (goroutines) for each incoming request. In this case, each -// worker will only be used for a single request. -const maxIdleWorkersCount = 10000 - -// The maximum length of time a worker may idle for before being destroyed. -const idleWorkerTimeout = 10 * time.Second - -// aLongTimeAgo is a non-zero time, far in the past, used for -// immediate cancelation of network operations. -var aLongTimeAgo = time.Unix(1, 0) - -// Handler is implemented by any value that implements ServeDNS. -type Handler interface { - ServeDNS(w ResponseWriter, r *Msg) -} - -// The HandlerFunc type is an adapter to allow the use of -// ordinary functions as DNS handlers. If f is a function -// with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(ResponseWriter, *Msg) - -// ServeDNS calls f(w, r). -func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) { - f(w, r) -} - -// A ResponseWriter interface is used by an DNS handler to -// construct an DNS response. -type ResponseWriter interface { - // LocalAddr returns the net.Addr of the server - LocalAddr() net.Addr - // RemoteAddr returns the net.Addr of the client that sent the current request. - RemoteAddr() net.Addr - // WriteMsg writes a reply back to the client. - WriteMsg(*Msg) error - // Write writes a raw buffer back to the client. - Write([]byte) (int, error) - // Close closes the connection. - Close() error - // TsigStatus returns the status of the Tsig. - TsigStatus() error - // TsigTimersOnly sets the tsig timers only boolean. - TsigTimersOnly(bool) - // Hijack lets the caller take over the connection. - // After a call to Hijack(), the DNS package will not do anything with the connection. - Hijack() -} - -// A ConnectionStater interface is used by a DNS Handler to access TLS connection state -// when available. -type ConnectionStater interface { - ConnectionState() *tls.ConnectionState -} - -type response struct { - msg []byte - closed bool // connection has been closed - hijacked bool // connection has been hijacked by handler - tsigTimersOnly bool - tsigStatus error - tsigRequestMAC string - tsigSecret map[string]string // the tsig secrets - udp *net.UDPConn // i/o connection if UDP was used - tcp net.Conn // i/o connection if TCP was used - udpSession *SessionUDP // oob data to get egress interface right - writer Writer // writer to output the raw DNS bits - wg *sync.WaitGroup // for gracefull shutdown -} - -// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. -func HandleFailed(w ResponseWriter, r *Msg) { - m := new(Msg) - m.SetRcode(r, RcodeServerFailure) - // does not matter if this write fails - w.WriteMsg(m) -} - -// ListenAndServe Starts a server on address and network specified Invoke handler -// for incoming queries. -func ListenAndServe(addr string, network string, handler Handler) error { - server := &Server{Addr: addr, Net: network, Handler: handler} - return server.ListenAndServe() -} - -// ListenAndServeTLS acts like http.ListenAndServeTLS, more information in -// http://golang.org/pkg/net/http/#ListenAndServeTLS -func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return err - } - - config := tls.Config{ - Certificates: []tls.Certificate{cert}, - } - - server := &Server{ - Addr: addr, - Net: "tcp-tls", - TLSConfig: &config, - Handler: handler, - } - - return server.ListenAndServe() -} - -// ActivateAndServe activates a server with a listener from systemd, -// l and p should not both be non-nil. -// If both l and p are not nil only p will be used. -// Invoke handler for incoming queries. -func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error { - server := &Server{Listener: l, PacketConn: p, Handler: handler} - return server.ActivateAndServe() -} - -// Writer writes raw DNS messages; each call to Write should send an entire message. -type Writer interface { - io.Writer -} - -// Reader reads raw DNS messages; each call to ReadTCP or ReadUDP should return an entire message. -type Reader interface { - // ReadTCP reads a raw message from a TCP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) - // ReadUDP reads a raw message from a UDP connection. Implementations may alter - // connection properties, for example the read-deadline. - ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) -} - -// defaultReader is an adapter for the Server struct that implements the Reader interface -// using the readTCP and readUDP func of the embedded Server. -type defaultReader struct { - *Server -} - -func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - return dr.readTCP(conn, timeout) -} - -func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - return dr.readUDP(conn, timeout) -} - -// DecorateReader is a decorator hook for extending or supplanting the functionality of a Reader. -// Implementations should never return a nil Reader. -type DecorateReader func(Reader) Reader - -// DecorateWriter is a decorator hook for extending or supplanting the functionality of a Writer. -// Implementations should never return a nil Writer. -type DecorateWriter func(Writer) Writer - -// A Server defines parameters for running an DNS server. -type Server struct { - // Address to listen on, ":dns" if empty. - Addr string - // if "tcp" or "tcp-tls" (DNS over TLS) it will invoke a TCP listener, otherwise an UDP one - Net string - // TCP Listener to use, this is to aid in systemd's socket activation. - Listener net.Listener - // TLS connection configuration - TLSConfig *tls.Config - // UDP "Listener" to use, this is to aid in systemd's socket activation. - PacketConn net.PacketConn - // Handler to invoke, dns.DefaultServeMux if nil. - Handler Handler - // Default buffer size to use to read incoming UDP messages. If not set - // it defaults to MinMsgSize (512 B). - UDPSize int - // The net.Conn.SetReadTimeout value for new connections, defaults to 2 * time.Second. - ReadTimeout time.Duration - // The net.Conn.SetWriteTimeout value for new connections, defaults to 2 * time.Second. - WriteTimeout time.Duration - // TCP idle timeout for multiple queries, if nil, defaults to 8 * time.Second (RFC 5966). - IdleTimeout func() time.Duration - // Secret(s) for Tsig map[]. The zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2). - TsigSecret map[string]string - // If NotifyStartedFunc is set it is called once the server has started listening. - NotifyStartedFunc func() - // DecorateReader is optional, allows customization of the process that reads raw DNS messages. - DecorateReader DecorateReader - // DecorateWriter is optional, allows customization of the process that writes raw DNS messages. - DecorateWriter DecorateWriter - // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). - MaxTCPQueries int - // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. - // It is only supported on go1.11+ and when using ListenAndServe. - ReusePort bool - // AcceptMsgFunc will check the incoming message and will reject it early in the process. - // By default DefaultMsgAcceptFunc will be used. - MsgAcceptFunc MsgAcceptFunc - - // UDP packet or TCP connection queue - queue chan *response - // Workers count - workersCount int32 - - // Shutdown handling - lock sync.RWMutex - started bool - shutdown chan struct{} - conns map[net.Conn]struct{} - - // A pool for UDP message buffers. - udpPool sync.Pool -} - -func (srv *Server) isStarted() bool { - srv.lock.RLock() - started := srv.started - srv.lock.RUnlock() - return started -} - -func (srv *Server) worker(w *response) { - srv.serve(w) - - for { - count := atomic.LoadInt32(&srv.workersCount) - if count > maxIdleWorkersCount { - return - } - if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) { - break - } - } - - defer atomic.AddInt32(&srv.workersCount, -1) - - inUse := false - timeout := time.NewTimer(idleWorkerTimeout) - defer timeout.Stop() -LOOP: - for { - select { - case w, ok := <-srv.queue: - if !ok { - break LOOP - } - inUse = true - srv.serve(w) - case <-timeout.C: - if !inUse { - break LOOP - } - inUse = false - timeout.Reset(idleWorkerTimeout) - } - } -} - -func (srv *Server) spawnWorker(w *response) { - select { - case srv.queue <- w: - default: - go srv.worker(w) - } -} - -func makeUDPBuffer(size int) func() interface{} { - return func() interface{} { - return make([]byte, size) - } -} - -func (srv *Server) init() { - srv.queue = make(chan *response) - - srv.shutdown = make(chan struct{}) - srv.conns = make(map[net.Conn]struct{}) - - if srv.UDPSize == 0 { - srv.UDPSize = MinMsgSize - } - if srv.MsgAcceptFunc == nil { - srv.MsgAcceptFunc = defaultMsgAcceptFunc - } - - srv.udpPool.New = makeUDPBuffer(srv.UDPSize) -} - -func unlockOnce(l sync.Locker) func() { - var once sync.Once - return func() { once.Do(l.Unlock) } -} - -// ListenAndServe starts a nameserver on the configured address in *Server. -func (srv *Server) ListenAndServe() error { - unlock := unlockOnce(&srv.lock) - srv.lock.Lock() - defer unlock() - - if srv.started { - return &Error{err: "server already started"} - } - - addr := srv.Addr - if addr == "" { - addr = ":domain" - } - - srv.init() - defer close(srv.queue) - - switch srv.Net { - case "tcp", "tcp4", "tcp6": - l, err := listenTCP(srv.Net, addr, srv.ReusePort) - if err != nil { - return err - } - srv.Listener = l - srv.started = true - unlock() - return srv.serveTCP(l) - case "tcp-tls", "tcp4-tls", "tcp6-tls": - if srv.TLSConfig == nil || (len(srv.TLSConfig.Certificates) == 0 && srv.TLSConfig.GetCertificate == nil) { - return errors.New("dns: neither Certificates nor GetCertificate set in Config") - } - network := strings.TrimSuffix(srv.Net, "-tls") - l, err := listenTCP(network, addr, srv.ReusePort) - if err != nil { - return err - } - l = tls.NewListener(l, srv.TLSConfig) - srv.Listener = l - srv.started = true - unlock() - return srv.serveTCP(l) - case "udp", "udp4", "udp6": - l, err := listenUDP(srv.Net, addr, srv.ReusePort) - if err != nil { - return err - } - u := l.(*net.UDPConn) - if e := setUDPSocketOptions(u); e != nil { - return e - } - srv.PacketConn = l - srv.started = true - unlock() - return srv.serveUDP(u) - } - return &Error{err: "bad network"} -} - -// ActivateAndServe starts a nameserver with the PacketConn or Listener -// configured in *Server. Its main use is to start a server from systemd. -func (srv *Server) ActivateAndServe() error { - unlock := unlockOnce(&srv.lock) - srv.lock.Lock() - defer unlock() - - if srv.started { - return &Error{err: "server already started"} - } - - srv.init() - defer close(srv.queue) - - pConn := srv.PacketConn - l := srv.Listener - if pConn != nil { - // Check PacketConn interface's type is valid and value - // is not nil - if t, ok := pConn.(*net.UDPConn); ok && t != nil { - if e := setUDPSocketOptions(t); e != nil { - return e - } - srv.started = true - unlock() - return srv.serveUDP(t) - } - } - if l != nil { - srv.started = true - unlock() - return srv.serveTCP(l) - } - return &Error{err: "bad listeners"} -} - -// Shutdown shuts down a server. After a call to Shutdown, ListenAndServe and -// ActivateAndServe will return. -func (srv *Server) Shutdown() error { - return srv.ShutdownContext(context.Background()) -} - -// ShutdownContext shuts down a server. After a call to ShutdownContext, -// ListenAndServe and ActivateAndServe will return. -// -// A context.Context may be passed to limit how long to wait for connections -// to terminate. -func (srv *Server) ShutdownContext(ctx context.Context) error { - srv.lock.Lock() - if !srv.started { - srv.lock.Unlock() - return &Error{err: "server not started"} - } - - srv.started = false - - if srv.PacketConn != nil { - srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads - } - - if srv.Listener != nil { - srv.Listener.Close() - } - - for rw := range srv.conns { - rw.SetReadDeadline(aLongTimeAgo) // Unblock reads - } - - srv.lock.Unlock() - - if testShutdownNotify != nil { - testShutdownNotify.Broadcast() - } - - var ctxErr error - select { - case <-srv.shutdown: - case <-ctx.Done(): - ctxErr = ctx.Err() - } - - if srv.PacketConn != nil { - srv.PacketConn.Close() - } - - return ctxErr -} - -var testShutdownNotify *sync.Cond - -// getReadTimeout is a helper func to use system timeout if server did not intend to change it. -func (srv *Server) getReadTimeout() time.Duration { - rtimeout := dnsTimeout - if srv.ReadTimeout != 0 { - rtimeout = srv.ReadTimeout - } - return rtimeout -} - -// serveTCP starts a TCP listener for the server. -func (srv *Server) serveTCP(l net.Listener) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(srv.shutdown) - }() - - for srv.isStarted() { - rw, err := l.Accept() - if err != nil { - if !srv.isStarted() { - return nil - } - if neterr, ok := err.(net.Error); ok && neterr.Temporary() { - continue - } - return err - } - srv.lock.Lock() - // Track the connection to allow unblocking reads on shutdown. - srv.conns[rw] = struct{}{} - srv.lock.Unlock() - wg.Add(1) - srv.spawnWorker(&response{ - tsigSecret: srv.TsigSecret, - tcp: rw, - wg: &wg, - }) - } - - return nil -} - -// serveUDP starts a UDP listener for the server. -func (srv *Server) serveUDP(l *net.UDPConn) error { - defer l.Close() - - if srv.NotifyStartedFunc != nil { - srv.NotifyStartedFunc() - } - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - var wg sync.WaitGroup - defer func() { - wg.Wait() - close(srv.shutdown) - }() - - rtimeout := srv.getReadTimeout() - // deadline is not used here - for srv.isStarted() { - m, s, err := reader.ReadUDP(l, rtimeout) - if err != nil { - if !srv.isStarted() { - return nil - } - if netErr, ok := err.(net.Error); ok && netErr.Temporary() { - continue - } - return err - } - if len(m) < headerSize { - if cap(m) == srv.UDPSize { - srv.udpPool.Put(m[:srv.UDPSize]) - } - continue - } - wg.Add(1) - srv.spawnWorker(&response{ - msg: m, - tsigSecret: srv.TsigSecret, - udp: l, - udpSession: s, - wg: &wg, - }) - } - - return nil -} - -func (srv *Server) serve(w *response) { - if srv.DecorateWriter != nil { - w.writer = srv.DecorateWriter(w) - } else { - w.writer = w - } - - if w.udp != nil { - // serve UDP - srv.serveDNS(w) - - w.wg.Done() - return - } - - defer func() { - if !w.hijacked { - w.Close() - } - - srv.lock.Lock() - delete(srv.conns, w.tcp) - srv.lock.Unlock() - - w.wg.Done() - }() - - reader := Reader(&defaultReader{srv}) - if srv.DecorateReader != nil { - reader = srv.DecorateReader(reader) - } - - idleTimeout := tcpIdleTimeout - if srv.IdleTimeout != nil { - idleTimeout = srv.IdleTimeout() - } - - timeout := srv.getReadTimeout() - - limit := srv.MaxTCPQueries - if limit == 0 { - limit = maxTCPQueries - } - - for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ { - var err error - w.msg, err = reader.ReadTCP(w.tcp, timeout) - if err != nil { - // TODO(tmthrgd): handle error - break - } - srv.serveDNS(w) - if w.tcp == nil { - break // Close() was called - } - if w.hijacked { - break // client will call Close() themselves - } - // The first read uses the read timeout, the rest use the - // idle timeout. - timeout = idleTimeout - } -} - -func (srv *Server) disposeBuffer(w *response) { - if w.udp != nil && cap(w.msg) == srv.UDPSize { - srv.udpPool.Put(w.msg[:srv.UDPSize]) - } - w.msg = nil -} - -func (srv *Server) serveDNS(w *response) { - dh, off, err := unpackMsgHdr(w.msg, 0) - if err != nil { - // Let client hang, they are sending crap; any reply can be used to amplify. - return - } - - req := new(Msg) - req.setHdr(dh) - - switch srv.MsgAcceptFunc(dh) { - case MsgAccept: - case MsgIgnore: - return - case MsgReject: - req.SetRcodeFormatError(req) - // Are we allowed to delete any OPT records here? - req.Ns, req.Answer, req.Extra = nil, nil, nil - - w.WriteMsg(req) - srv.disposeBuffer(w) - return - } - - if err := req.unpack(dh, w.msg, off); err != nil { - req.SetRcodeFormatError(req) - req.Ns, req.Answer, req.Extra = nil, nil, nil - - w.WriteMsg(req) - srv.disposeBuffer(w) - return - } - - w.tsigStatus = nil - if w.tsigSecret != nil { - if t := req.IsTsig(); t != nil { - if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { - w.tsigStatus = TsigVerify(w.msg, secret, "", false) - } else { - w.tsigStatus = ErrSecret - } - w.tsigTimersOnly = false - w.tsigRequestMAC = req.Extra[len(req.Extra)-1].(*TSIG).MAC - } - } - - srv.disposeBuffer(w) - - handler := srv.Handler - if handler == nil { - handler = DefaultServeMux - } - - handler.ServeDNS(w, req) // Writes back to the client -} - -func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { - // If we race with ShutdownContext, the read deadline may - // have been set in the distant past to unblock the read - // below. We must not override it, otherwise we may block - // ShutdownContext. - srv.lock.RLock() - if srv.started { - conn.SetReadDeadline(time.Now().Add(timeout)) - } - srv.lock.RUnlock() - - l := make([]byte, 2) - n, err := conn.Read(l) - if err != nil || n != 2 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - length := binary.BigEndian.Uint16(l) - if length == 0 { - return nil, ErrShortRead - } - m := make([]byte, int(length)) - n, err = conn.Read(m[:int(length)]) - if err != nil || n == 0 { - if err != nil { - return nil, err - } - return nil, ErrShortRead - } - i := n - for i < int(length) { - j, err := conn.Read(m[i:int(length)]) - if err != nil { - return nil, err - } - i += j - } - n = i - m = m[:n] - return m, nil -} - -func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { - srv.lock.RLock() - if srv.started { - // See the comment in readTCP above. - conn.SetReadDeadline(time.Now().Add(timeout)) - } - srv.lock.RUnlock() - - m := srv.udpPool.Get().([]byte) - n, s, err := ReadFromSessionUDP(conn, m) - if err != nil { - srv.udpPool.Put(m) - return nil, nil, err - } - m = m[:n] - return m, s, nil -} - -// WriteMsg implements the ResponseWriter.WriteMsg method. -func (w *response) WriteMsg(m *Msg) (err error) { - if w.closed { - return &Error{err: "WriteMsg called after Close"} - } - - var data []byte - if w.tsigSecret != nil { // if no secrets, dont check for the tsig (which is a longer check) - if t := m.IsTsig(); t != nil { - data, w.tsigRequestMAC, err = TsigGenerate(m, w.tsigSecret[t.Hdr.Name], w.tsigRequestMAC, w.tsigTimersOnly) - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err - } - } - data, err = m.Pack() - if err != nil { - return err - } - _, err = w.writer.Write(data) - return err -} - -// Write implements the ResponseWriter.Write method. -func (w *response) Write(m []byte) (int, error) { - if w.closed { - return 0, &Error{err: "Write called after Close"} - } - - switch { - case w.udp != nil: - n, err := WriteToSessionUDP(w.udp, m, w.udpSession) - return n, err - case w.tcp != nil: - lm := len(m) - if lm < 2 { - return 0, io.ErrShortBuffer - } - if lm > MaxMsgSize { - return 0, &Error{err: "message too large"} - } - l := make([]byte, 2, 2+lm) - binary.BigEndian.PutUint16(l, uint16(lm)) - m = append(l, m...) - - n, err := io.Copy(w.tcp, bytes.NewReader(m)) - return int(n), err - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// LocalAddr implements the ResponseWriter.LocalAddr method. -func (w *response) LocalAddr() net.Addr { - switch { - case w.udp != nil: - return w.udp.LocalAddr() - case w.tcp != nil: - return w.tcp.LocalAddr() - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// RemoteAddr implements the ResponseWriter.RemoteAddr method. -func (w *response) RemoteAddr() net.Addr { - switch { - case w.udpSession != nil: - return w.udpSession.RemoteAddr() - case w.tcp != nil: - return w.tcp.RemoteAddr() - default: - panic("dns: internal error: udpSession and tcp both nil") - } -} - -// TsigStatus implements the ResponseWriter.TsigStatus method. -func (w *response) TsigStatus() error { return w.tsigStatus } - -// TsigTimersOnly implements the ResponseWriter.TsigTimersOnly method. -func (w *response) TsigTimersOnly(b bool) { w.tsigTimersOnly = b } - -// Hijack implements the ResponseWriter.Hijack method. -func (w *response) Hijack() { w.hijacked = true } - -// Close implements the ResponseWriter.Close method -func (w *response) Close() error { - if w.closed { - return &Error{err: "connection already closed"} - } - w.closed = true - - switch { - case w.udp != nil: - // Can't close the udp conn, as that is actually the listener. - return nil - case w.tcp != nil: - return w.tcp.Close() - default: - panic("dns: internal error: udp and tcp both nil") - } -} - -// ConnectionState() implements the ConnectionStater.ConnectionState() interface. -func (w *response) ConnectionState() *tls.ConnectionState { - type tlsConnectionStater interface { - ConnectionState() tls.ConnectionState - } - if v, ok := w.tcp.(tlsConnectionStater); ok { - t := v.ConnectionState() - return &t - } - return nil -} diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go deleted file mode 100644 index e97f63968..000000000 --- a/vendor/github.com/miekg/dns/sig0.go +++ /dev/null @@ -1,217 +0,0 @@ -package dns - -import ( - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/rsa" - "encoding/binary" - "math/big" - "strings" - "time" -) - -// Sign signs a dns.Msg. It fills the signature with the appropriate data. -// The SIG record should have the SignerName, KeyTag, Algorithm, Inception -// and Expiration set. -func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) { - if k == nil { - return nil, ErrPrivKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return nil, ErrKey - } - rr.Header().Rrtype = TypeSIG - rr.Header().Class = ClassANY - rr.Header().Ttl = 0 - rr.Header().Name = "." - rr.OrigTtl = 0 - rr.TypeCovered = 0 - rr.Labels = 0 - - buf := make([]byte, m.Len()+Len(rr)) - mbuf, err := m.PackBuffer(buf) - if err != nil { - return nil, err - } - if &buf[0] != &mbuf[0] { - return nil, ErrBuf - } - off, err := PackRR(rr, buf, len(mbuf), nil, false) - if err != nil { - return nil, err - } - buf = buf[:off:cap(buf)] - - hash, ok := AlgorithmToHash[rr.Algorithm] - if !ok { - return nil, ErrAlg - } - - hasher := hash.New() - // Write SIG rdata - hasher.Write(buf[len(mbuf)+1+2+2+4+2:]) - // Write message - hasher.Write(buf[:len(mbuf)]) - - signature, err := sign(k, hasher.Sum(nil), hash, rr.Algorithm) - if err != nil { - return nil, err - } - - rr.Signature = toBase64(signature) - - buf = append(buf, signature...) - if len(buf) > int(^uint16(0)) { - return nil, ErrBuf - } - // Adjust sig data length - rdoff := len(mbuf) + 1 + 2 + 2 + 4 - rdlen := binary.BigEndian.Uint16(buf[rdoff:]) - rdlen += uint16(len(signature)) - binary.BigEndian.PutUint16(buf[rdoff:], rdlen) - // Adjust additional count - adc := binary.BigEndian.Uint16(buf[10:]) - adc++ - binary.BigEndian.PutUint16(buf[10:], adc) - return buf, nil -} - -// Verify validates the message buf using the key k. -// It's assumed that buf is a valid message from which rr was unpacked. -func (rr *SIG) Verify(k *KEY, buf []byte) error { - if k == nil { - return ErrKey - } - if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { - return ErrKey - } - - var hash crypto.Hash - switch rr.Algorithm { - case DSA, RSASHA1: - hash = crypto.SHA1 - case RSASHA256, ECDSAP256SHA256: - hash = crypto.SHA256 - case ECDSAP384SHA384: - hash = crypto.SHA384 - case RSASHA512: - hash = crypto.SHA512 - default: - return ErrAlg - } - hasher := hash.New() - - buflen := len(buf) - qdc := binary.BigEndian.Uint16(buf[4:]) - anc := binary.BigEndian.Uint16(buf[6:]) - auc := binary.BigEndian.Uint16(buf[8:]) - adc := binary.BigEndian.Uint16(buf[10:]) - offset := 12 - var err error - for i := uint16(0); i < qdc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type and Class - offset += 2 + 2 - } - for i := uint16(1); i < anc+auc+adc && offset < buflen; i++ { - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip past Type, Class and TTL - offset += 2 + 2 + 4 - if offset+1 >= buflen { - continue - } - rdlen := binary.BigEndian.Uint16(buf[offset:]) - offset += 2 - offset += int(rdlen) - } - if offset >= buflen { - return &Error{err: "overflowing unpacking signed message"} - } - - // offset should be just prior to SIG - bodyend := offset - // owner name SHOULD be root - _, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // Skip Type, Class, TTL, RDLen - offset += 2 + 2 + 4 + 2 - sigstart := offset - // Skip Type Covered, Algorithm, Labels, Original TTL - offset += 2 + 1 + 1 + 4 - if offset+4+4 >= buflen { - return &Error{err: "overflow unpacking signed message"} - } - expire := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - incept := binary.BigEndian.Uint32(buf[offset:]) - offset += 4 - now := uint32(time.Now().Unix()) - if now < incept || now > expire { - return ErrTime - } - // Skip key tag - offset += 2 - var signername string - signername, offset, err = UnpackDomainName(buf, offset) - if err != nil { - return err - } - // If key has come from the DNS name compression might - // have mangled the case of the name - if !strings.EqualFold(signername, k.Header().Name) { - return &Error{err: "signer name doesn't match key name"} - } - sigend := offset - hasher.Write(buf[sigstart:sigend]) - hasher.Write(buf[:10]) - hasher.Write([]byte{ - byte((adc - 1) << 8), - byte(adc - 1), - }) - hasher.Write(buf[12:bodyend]) - - hashed := hasher.Sum(nil) - sig := buf[sigend:] - switch k.Algorithm { - case DSA: - pk := k.publicKeyDSA() - sig = sig[1:] - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if dsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - case RSASHA1, RSASHA256, RSASHA512: - pk := k.publicKeyRSA() - if pk != nil { - return rsa.VerifyPKCS1v15(pk, hash, hashed, sig) - } - case ECDSAP256SHA256, ECDSAP384SHA384: - pk := k.publicKeyECDSA() - r := big.NewInt(0) - r.SetBytes(sig[:len(sig)/2]) - s := big.NewInt(0) - s.SetBytes(sig[len(sig)/2:]) - if pk != nil { - if ecdsa.Verify(pk, hashed, r, s) { - return nil - } - return ErrSig - } - } - return ErrKeyAlg -} diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index 9573c7d0b..000000000 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - g.Lock() - delete(g.m, key) - g.Unlock() - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/miekg/dns/smimea.go b/vendor/github.com/miekg/dns/smimea.go deleted file mode 100644 index 4e7ded4b3..000000000 --- a/vendor/github.com/miekg/dns/smimea.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/sha256" - "crypto/x509" - "encoding/hex" -) - -// Sign creates a SMIMEA record from an SSL certificate. -func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeSMIMEA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a SMIMEA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *SMIMEA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// SMIMEAName returns the ownername of a SMIMEA resource record as per the -// format specified in RFC 'draft-ietf-dane-smime-12' Section 2 and 3 -func SMIMEAName(email, domain string) (string, error) { - hasher := sha256.New() - hasher.Write([]byte(email)) - - // RFC Section 3: "The local-part is hashed using the SHA2-256 - // algorithm with the hash truncated to 28 octets and - // represented in its hexadecimal representation to become the - // left-most label in the prepared domain name" - return hex.EncodeToString(hasher.Sum(nil)[:28]) + "." + "_smimecert." + domain, nil -} diff --git a/vendor/github.com/miekg/dns/tlsa.go b/vendor/github.com/miekg/dns/tlsa.go deleted file mode 100644 index 431e2fb5a..000000000 --- a/vendor/github.com/miekg/dns/tlsa.go +++ /dev/null @@ -1,47 +0,0 @@ -package dns - -import ( - "crypto/x509" - "net" - "strconv" -) - -// Sign creates a TLSA record from an SSL certificate. -func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (err error) { - r.Hdr.Rrtype = TypeTLSA - r.Usage = uint8(usage) - r.Selector = uint8(selector) - r.MatchingType = uint8(matchingType) - - r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err - } - return nil -} - -// Verify verifies a TLSA record against an SSL certificate. If it is OK -// a nil error is returned. -func (r *TLSA) Verify(cert *x509.Certificate) error { - c, err := CertificateToDANE(r.Selector, r.MatchingType, cert) - if err != nil { - return err // Not also ErrSig? - } - if r.Certificate == c { - return nil - } - return ErrSig // ErrSig, really? -} - -// TLSAName returns the ownername of a TLSA resource record as per the -// rules specified in RFC 6698, Section 3. -func TLSAName(name, service, network string) (string, error) { - if !IsFqdn(name) { - return "", ErrFqdn - } - p, err := net.LookupPort(network, service) - if err != nil { - return "", err - } - return "_" + strconv.Itoa(p) + "._" + network + "." + name, nil -} diff --git a/vendor/github.com/miekg/dns/tsig.go b/vendor/github.com/miekg/dns/tsig.go deleted file mode 100644 index 91b69d582..000000000 --- a/vendor/github.com/miekg/dns/tsig.go +++ /dev/null @@ -1,386 +0,0 @@ -package dns - -import ( - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/binary" - "encoding/hex" - "hash" - "strconv" - "strings" - "time" -) - -// HMAC hashing codes. These are transmitted as domain names. -const ( - HmacMD5 = "hmac-md5.sig-alg.reg.int." - HmacSHA1 = "hmac-sha1." - HmacSHA256 = "hmac-sha256." - HmacSHA512 = "hmac-sha512." -) - -// TSIG is the RR the holds the transaction signature of a message. -// See RFC 2845 and RFC 4635. -type TSIG struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` - OrigId uint16 - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TSIG has no official presentation format, but this will suffice. - -func (rr *TSIG) String() string { - s := "\n;; TSIG PSEUDOSECTION:\n" - s += rr.Hdr.String() + - " " + rr.Algorithm + - " " + tsigTimeToString(rr.TimeSigned) + - " " + strconv.Itoa(int(rr.Fudge)) + - " " + strconv.Itoa(int(rr.MACSize)) + - " " + strings.ToUpper(rr.MAC) + - " " + strconv.Itoa(int(rr.OrigId)) + - " " + strconv.Itoa(int(rr.Error)) + // BIND prints NOERROR - " " + strconv.Itoa(int(rr.OtherLen)) + - " " + rr.OtherData - return s -} - -// The following values must be put in wireformat, so that the MAC can be calculated. -// RFC 2845, section 3.4.2. TSIG Variables. -type tsigWireFmt struct { - // From RR_Header - Name string `dns:"domain-name"` - Class uint16 - Ttl uint32 - // Rdata of the TSIG - Algorithm string `dns:"domain-name"` - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 - // MACSize, MAC and OrigId excluded - Error uint16 - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// If we have the MAC use this type to convert it to wiredata. Section 3.4.3. Request MAC -type macWireFmt struct { - MACSize uint16 - MAC string `dns:"size-hex:MACSize"` -} - -// 3.3. Time values used in TSIG calculations -type timerWireFmt struct { - TimeSigned uint64 `dns:"uint48"` - Fudge uint16 -} - -// TsigGenerate fills out the TSIG record attached to the message. -// The message should contain -// a "stub" TSIG RR with the algorithm, key name (owner name of the RR), -// time fudge (defaults to 300 seconds) and the current time -// The TSIG MAC is saved in that Tsig RR. -// When TsigGenerate is called for the first time requestMAC is set to the empty string and -// timersOnly is false. -// If something goes wrong an error is returned, otherwise it is nil. -func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, string, error) { - if m.IsTsig() == nil { - panic("dns: TSIG not last RR in additional") - } - // If we barf here, the caller is to blame - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return nil, "", err - } - - rr := m.Extra[len(m.Extra)-1].(*TSIG) - m.Extra = m.Extra[0 : len(m.Extra)-1] // kill the TSIG from the msg - mbuf, err := m.Pack() - if err != nil { - return nil, "", err - } - buf := tsigBuffer(mbuf, rr, requestMAC, timersOnly) - - t := new(TSIG) - var h hash.Hash - switch strings.ToLower(rr.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, []byte(rawsecret)) - case HmacSHA1: - h = hmac.New(sha1.New, []byte(rawsecret)) - case HmacSHA256: - h = hmac.New(sha256.New, []byte(rawsecret)) - case HmacSHA512: - h = hmac.New(sha512.New, []byte(rawsecret)) - default: - return nil, "", ErrKeyAlg - } - h.Write(buf) - t.MAC = hex.EncodeToString(h.Sum(nil)) - t.MACSize = uint16(len(t.MAC) / 2) // Size is half! - - t.Hdr = RR_Header{Name: rr.Hdr.Name, Rrtype: TypeTSIG, Class: ClassANY, Ttl: 0} - t.Fudge = rr.Fudge - t.TimeSigned = rr.TimeSigned - t.Algorithm = rr.Algorithm - t.OrigId = m.Id - - tbuf := make([]byte, Len(t)) - if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { - tbuf = tbuf[:off] // reset to actual size used - } else { - return nil, "", err - } - mbuf = append(mbuf, tbuf...) - // Update the ArCount directly in the buffer. - binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) - - return mbuf, t.MAC, nil -} - -// TsigVerify verifies the TSIG on a message. -// If the signature does not validate err contains the -// error, otherwise it is nil. -func TsigVerify(msg []byte, secret, requestMAC string, timersOnly bool) error { - rawsecret, err := fromBase64([]byte(secret)) - if err != nil { - return err - } - // Strip the TSIG from the incoming msg - stripped, tsig, err := stripTsig(msg) - if err != nil { - return err - } - - msgMAC, err := hex.DecodeString(tsig.MAC) - if err != nil { - return err - } - - buf := tsigBuffer(stripped, tsig, requestMAC, timersOnly) - - // Fudge factor works both ways. A message can arrive before it was signed because - // of clock skew. - now := uint64(time.Now().Unix()) - ti := now - tsig.TimeSigned - if now < tsig.TimeSigned { - ti = tsig.TimeSigned - now - } - if uint64(tsig.Fudge) < ti { - return ErrTime - } - - var h hash.Hash - switch strings.ToLower(tsig.Algorithm) { - case HmacMD5: - h = hmac.New(md5.New, rawsecret) - case HmacSHA1: - h = hmac.New(sha1.New, rawsecret) - case HmacSHA256: - h = hmac.New(sha256.New, rawsecret) - case HmacSHA512: - h = hmac.New(sha512.New, rawsecret) - default: - return ErrKeyAlg - } - h.Write(buf) - if !hmac.Equal(h.Sum(nil), msgMAC) { - return ErrSig - } - return nil -} - -// Create a wiredata buffer for the MAC calculation. -func tsigBuffer(msgbuf []byte, rr *TSIG, requestMAC string, timersOnly bool) []byte { - var buf []byte - if rr.TimeSigned == 0 { - rr.TimeSigned = uint64(time.Now().Unix()) - } - if rr.Fudge == 0 { - rr.Fudge = 300 // Standard (RFC) default. - } - - // Replace message ID in header with original ID from TSIG - binary.BigEndian.PutUint16(msgbuf[0:2], rr.OrigId) - - if requestMAC != "" { - m := new(macWireFmt) - m.MACSize = uint16(len(requestMAC) / 2) - m.MAC = requestMAC - buf = make([]byte, len(requestMAC)) // long enough - n, _ := packMacWire(m, buf) - buf = buf[:n] - } - - tsigvar := make([]byte, DefaultMsgSize) - if timersOnly { - tsig := new(timerWireFmt) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - n, _ := packTimerWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } else { - tsig := new(tsigWireFmt) - tsig.Name = strings.ToLower(rr.Hdr.Name) - tsig.Class = ClassANY - tsig.Ttl = rr.Hdr.Ttl - tsig.Algorithm = strings.ToLower(rr.Algorithm) - tsig.TimeSigned = rr.TimeSigned - tsig.Fudge = rr.Fudge - tsig.Error = rr.Error - tsig.OtherLen = rr.OtherLen - tsig.OtherData = rr.OtherData - n, _ := packTsigWire(tsig, tsigvar) - tsigvar = tsigvar[:n] - } - - if requestMAC != "" { - x := append(buf, msgbuf...) - buf = append(x, tsigvar...) - } else { - buf = append(msgbuf, tsigvar...) - } - return buf -} - -// Strip the TSIG from the raw message. -func stripTsig(msg []byte) ([]byte, *TSIG, error) { - // Copied from msg.go's Unpack() Header, but modified. - var ( - dh Header - err error - ) - off, tsigoff := 0, 0 - - if dh, off, err = unpackMsgHdr(msg, off); err != nil { - return nil, nil, err - } - if dh.Arcount == 0 { - return nil, nil, ErrNoSig - } - - // Rcode, see msg.go Unpack() - if int(dh.Bits&0xF) == RcodeNotAuth { - return nil, nil, ErrAuth - } - - for i := 0; i < int(dh.Qdcount); i++ { - _, off, err = unpackQuestion(msg, off) - if err != nil { - return nil, nil, err - } - } - - _, off, err = unpackRRslice(int(dh.Ancount), msg, off) - if err != nil { - return nil, nil, err - } - _, off, err = unpackRRslice(int(dh.Nscount), msg, off) - if err != nil { - return nil, nil, err - } - - rr := new(TSIG) - var extra RR - for i := 0; i < int(dh.Arcount); i++ { - tsigoff = off - extra, off, err = UnpackRR(msg, off) - if err != nil { - return nil, nil, err - } - if extra.Header().Rrtype == TypeTSIG { - rr = extra.(*TSIG) - // Adjust Arcount. - arcount := binary.BigEndian.Uint16(msg[10:]) - binary.BigEndian.PutUint16(msg[10:], arcount-1) - break - } - } - if rr == nil { - return nil, nil, ErrNoSig - } - return msg[:tsigoff], rr, nil -} - -// Translate the TSIG time signed into a date. There is no -// need for RFC1982 calculations as this date is 48 bits. -func tsigTimeToString(t uint64) string { - ti := time.Unix(int64(t), 0).UTC() - return ti.Format("20060102150405") -} - -func packTsigWire(tw *tsigWireFmt, msg []byte) (int, error) { - // copied from zmsg.go TSIG packing - // RR_Header - off, err := PackDomainName(tw.Name, msg, 0, nil, false) - if err != nil { - return off, err - } - off, err = packUint16(tw.Class, msg, off) - if err != nil { - return off, err - } - off, err = packUint32(tw.Ttl, msg, off) - if err != nil { - return off, err - } - - off, err = PackDomainName(tw.Algorithm, msg, off, nil, false) - if err != nil { - return off, err - } - off, err = packUint48(tw.TimeSigned, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - - off, err = packUint16(tw.Error, msg, off) - if err != nil { - return off, err - } - off, err = packUint16(tw.OtherLen, msg, off) - if err != nil { - return off, err - } - off, err = packStringHex(tw.OtherData, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packMacWire(mw *macWireFmt, msg []byte) (int, error) { - off, err := packUint16(mw.MACSize, msg, 0) - if err != nil { - return off, err - } - off, err = packStringHex(mw.MAC, msg, off) - if err != nil { - return off, err - } - return off, nil -} - -func packTimerWire(tw *timerWireFmt, msg []byte) (int, error) { - off, err := packUint48(tw.TimeSigned, msg, 0) - if err != nil { - return off, err - } - off, err = packUint16(tw.Fudge, msg, off) - if err != nil { - return off, err - } - return off, nil -} diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go deleted file mode 100644 index c8658b343..000000000 --- a/vendor/github.com/miekg/dns/types.go +++ /dev/null @@ -1,1412 +0,0 @@ -package dns - -import ( - "fmt" - "net" - "strconv" - "strings" - "time" -) - -type ( - // Type is a DNS type. - Type uint16 - // Class is a DNS class. - Class uint16 - // Name is a DNS domain name. - Name string -) - -// Packet formats - -// Wire constants and supported types. -const ( - // valid RR_Header.Rrtype and Question.qtype - - TypeNone uint16 = 0 - TypeA uint16 = 1 - TypeNS uint16 = 2 - TypeMD uint16 = 3 - TypeMF uint16 = 4 - TypeCNAME uint16 = 5 - TypeSOA uint16 = 6 - TypeMB uint16 = 7 - TypeMG uint16 = 8 - TypeMR uint16 = 9 - TypeNULL uint16 = 10 - TypePTR uint16 = 12 - TypeHINFO uint16 = 13 - TypeMINFO uint16 = 14 - TypeMX uint16 = 15 - TypeTXT uint16 = 16 - TypeRP uint16 = 17 - TypeAFSDB uint16 = 18 - TypeX25 uint16 = 19 - TypeISDN uint16 = 20 - TypeRT uint16 = 21 - TypeNSAPPTR uint16 = 23 - TypeSIG uint16 = 24 - TypeKEY uint16 = 25 - TypePX uint16 = 26 - TypeGPOS uint16 = 27 - TypeAAAA uint16 = 28 - TypeLOC uint16 = 29 - TypeNXT uint16 = 30 - TypeEID uint16 = 31 - TypeNIMLOC uint16 = 32 - TypeSRV uint16 = 33 - TypeATMA uint16 = 34 - TypeNAPTR uint16 = 35 - TypeKX uint16 = 36 - TypeCERT uint16 = 37 - TypeDNAME uint16 = 39 - TypeOPT uint16 = 41 // EDNS - TypeDS uint16 = 43 - TypeSSHFP uint16 = 44 - TypeRRSIG uint16 = 46 - TypeNSEC uint16 = 47 - TypeDNSKEY uint16 = 48 - TypeDHCID uint16 = 49 - TypeNSEC3 uint16 = 50 - TypeNSEC3PARAM uint16 = 51 - TypeTLSA uint16 = 52 - TypeSMIMEA uint16 = 53 - TypeHIP uint16 = 55 - TypeNINFO uint16 = 56 - TypeRKEY uint16 = 57 - TypeTALINK uint16 = 58 - TypeCDS uint16 = 59 - TypeCDNSKEY uint16 = 60 - TypeOPENPGPKEY uint16 = 61 - TypeCSYNC uint16 = 62 - TypeSPF uint16 = 99 - TypeUINFO uint16 = 100 - TypeUID uint16 = 101 - TypeGID uint16 = 102 - TypeUNSPEC uint16 = 103 - TypeNID uint16 = 104 - TypeL32 uint16 = 105 - TypeL64 uint16 = 106 - TypeLP uint16 = 107 - TypeEUI48 uint16 = 108 - TypeEUI64 uint16 = 109 - TypeURI uint16 = 256 - TypeCAA uint16 = 257 - TypeAVC uint16 = 258 - - TypeTKEY uint16 = 249 - TypeTSIG uint16 = 250 - - // valid Question.Qtype only - TypeIXFR uint16 = 251 - TypeAXFR uint16 = 252 - TypeMAILB uint16 = 253 - TypeMAILA uint16 = 254 - TypeANY uint16 = 255 - - TypeTA uint16 = 32768 - TypeDLV uint16 = 32769 - TypeReserved uint16 = 65535 - - // valid Question.Qclass - ClassINET = 1 - ClassCSNET = 2 - ClassCHAOS = 3 - ClassHESIOD = 4 - ClassNONE = 254 - ClassANY = 255 - - // Message Response Codes, see https://www.iana.org/assignments/dns-parameters/dns-parameters.xhtml - RcodeSuccess = 0 // NoError - No Error [DNS] - RcodeFormatError = 1 // FormErr - Format Error [DNS] - RcodeServerFailure = 2 // ServFail - Server Failure [DNS] - RcodeNameError = 3 // NXDomain - Non-Existent Domain [DNS] - RcodeNotImplemented = 4 // NotImp - Not Implemented [DNS] - RcodeRefused = 5 // Refused - Query Refused [DNS] - RcodeYXDomain = 6 // YXDomain - Name Exists when it should not [DNS Update] - RcodeYXRrset = 7 // YXRRSet - RR Set Exists when it should not [DNS Update] - RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] - RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] - RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] - RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] - RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] - RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] - RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] - RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] - RcodeBadName = 20 // BADNAME - Duplicate key name [TKEY] - RcodeBadAlg = 21 // BADALG - Algorithm not supported [TKEY] - RcodeBadTrunc = 22 // BADTRUNC - Bad Truncation [TSIG] - RcodeBadCookie = 23 // BADCOOKIE - Bad/missing Server Cookie [DNS Cookies] - - // Message Opcodes. There is no 3. - OpcodeQuery = 0 - OpcodeIQuery = 1 - OpcodeStatus = 2 - OpcodeNotify = 4 - OpcodeUpdate = 5 -) - -// Header is the wire format for the DNS packet header. -type Header struct { - Id uint16 - Bits uint16 - Qdcount, Ancount, Nscount, Arcount uint16 -} - -const ( - headerSize = 12 - - // Header.Bits - _QR = 1 << 15 // query/response (response=1) - _AA = 1 << 10 // authoritative - _TC = 1 << 9 // truncated - _RD = 1 << 8 // recursion desired - _RA = 1 << 7 // recursion available - _Z = 1 << 6 // Z - _AD = 1 << 5 // authticated data - _CD = 1 << 4 // checking disabled -) - -// Various constants used in the LOC RR, See RFC 1887. -const ( - LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. - LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. - LOC_HOURS = 60 * 1000 - LOC_DEGREES = 60 * LOC_HOURS - LOC_ALTITUDEBASE = 100000 -) - -// Different Certificate Types, see RFC 4398, Section 2.1 -const ( - CertPKIX = 1 + iota - CertSPKI - CertPGP - CertIPIX - CertISPKI - CertIPGP - CertACPKIX - CertIACPKIX - CertURI = 253 - CertOID = 254 -) - -// CertTypeToString converts the Cert Type to its string representation. -// See RFC 4398 and RFC 6944. -var CertTypeToString = map[uint16]string{ - CertPKIX: "PKIX", - CertSPKI: "SPKI", - CertPGP: "PGP", - CertIPIX: "IPIX", - CertISPKI: "ISPKI", - CertIPGP: "IPGP", - CertACPKIX: "ACPKIX", - CertIACPKIX: "IACPKIX", - CertURI: "URI", - CertOID: "OID", -} - -// StringToCertType is the reverseof CertTypeToString. -var StringToCertType = reverseInt16(CertTypeToString) - -//go:generate go run types_generate.go - -// Question holds a DNS question. There can be multiple questions in the -// question section of a message. Usually there is just one. -type Question struct { - Name string `dns:"cdomain-name"` // "cdomain-name" specifies encoding (and may be compressed) - Qtype uint16 - Qclass uint16 -} - -func (q *Question) len(off int, compression map[string]struct{}) int { - l := domainNameLen(q.Name, off, compression, true) - l += 2 + 2 - return l -} - -func (q *Question) String() (s string) { - // prefix with ; (as in dig) - s = ";" + sprintName(q.Name) + "\t" - s += Class(q.Qclass).String() + "\t" - s += " " + Type(q.Qtype).String() - return s -} - -// ANY is a wildcard record. See RFC 1035, Section 3.2.3. ANY -// is named "*" there. -type ANY struct { - Hdr RR_Header - // Does not have any rdata -} - -func (rr *ANY) String() string { return rr.Hdr.String() } - -// CNAME RR. See RFC 1034. -type CNAME struct { - Hdr RR_Header - Target string `dns:"cdomain-name"` -} - -func (rr *CNAME) String() string { return rr.Hdr.String() + sprintName(rr.Target) } - -// HINFO RR. See RFC 1034. -type HINFO struct { - Hdr RR_Header - Cpu string - Os string -} - -func (rr *HINFO) String() string { - return rr.Hdr.String() + sprintTxt([]string{rr.Cpu, rr.Os}) -} - -// MB RR. See RFC 1035. -type MB struct { - Hdr RR_Header - Mb string `dns:"cdomain-name"` -} - -func (rr *MB) String() string { return rr.Hdr.String() + sprintName(rr.Mb) } - -// MG RR. See RFC 1035. -type MG struct { - Hdr RR_Header - Mg string `dns:"cdomain-name"` -} - -func (rr *MG) String() string { return rr.Hdr.String() + sprintName(rr.Mg) } - -// MINFO RR. See RFC 1035. -type MINFO struct { - Hdr RR_Header - Rmail string `dns:"cdomain-name"` - Email string `dns:"cdomain-name"` -} - -func (rr *MINFO) String() string { - return rr.Hdr.String() + sprintName(rr.Rmail) + " " + sprintName(rr.Email) -} - -// MR RR. See RFC 1035. -type MR struct { - Hdr RR_Header - Mr string `dns:"cdomain-name"` -} - -func (rr *MR) String() string { - return rr.Hdr.String() + sprintName(rr.Mr) -} - -// MF RR. See RFC 1035. -type MF struct { - Hdr RR_Header - Mf string `dns:"cdomain-name"` -} - -func (rr *MF) String() string { - return rr.Hdr.String() + sprintName(rr.Mf) -} - -// MD RR. See RFC 1035. -type MD struct { - Hdr RR_Header - Md string `dns:"cdomain-name"` -} - -func (rr *MD) String() string { - return rr.Hdr.String() + sprintName(rr.Md) -} - -// MX RR. See RFC 1035. -type MX struct { - Hdr RR_Header - Preference uint16 - Mx string `dns:"cdomain-name"` -} - -func (rr *MX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Mx) -} - -// AFSDB RR. See RFC 1183. -type AFSDB struct { - Hdr RR_Header - Subtype uint16 - Hostname string `dns:"domain-name"` -} - -func (rr *AFSDB) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Subtype)) + " " + sprintName(rr.Hostname) -} - -// X25 RR. See RFC 1183, Section 3.1. -type X25 struct { - Hdr RR_Header - PSDNAddress string -} - -func (rr *X25) String() string { - return rr.Hdr.String() + rr.PSDNAddress -} - -// RT RR. See RFC 1183, Section 3.3. -type RT struct { - Hdr RR_Header - Preference uint16 - Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035. -} - -func (rr *RT) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Host) -} - -// NS RR. See RFC 1035. -type NS struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` -} - -func (rr *NS) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) -} - -// PTR RR. See RFC 1035. -type PTR struct { - Hdr RR_Header - Ptr string `dns:"cdomain-name"` -} - -func (rr *PTR) String() string { - return rr.Hdr.String() + sprintName(rr.Ptr) -} - -// RP RR. See RFC 1138, Section 2.2. -type RP struct { - Hdr RR_Header - Mbox string `dns:"domain-name"` - Txt string `dns:"domain-name"` -} - -func (rr *RP) String() string { - return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) -} - -// SOA RR. See RFC 1035. -type SOA struct { - Hdr RR_Header - Ns string `dns:"cdomain-name"` - Mbox string `dns:"cdomain-name"` - Serial uint32 - Refresh uint32 - Retry uint32 - Expire uint32 - Minttl uint32 -} - -func (rr *SOA) String() string { - return rr.Hdr.String() + sprintName(rr.Ns) + " " + sprintName(rr.Mbox) + - " " + strconv.FormatInt(int64(rr.Serial), 10) + - " " + strconv.FormatInt(int64(rr.Refresh), 10) + - " " + strconv.FormatInt(int64(rr.Retry), 10) + - " " + strconv.FormatInt(int64(rr.Expire), 10) + - " " + strconv.FormatInt(int64(rr.Minttl), 10) -} - -// TXT RR. See RFC 1035. -type TXT struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -func sprintName(s string) string { - var dst strings.Builder - dst.Grow(len(s)) - for i := 0; i < len(s); { - if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { - dst.WriteString(s[i : i+2]) - i += 2 - continue - } - - b, n := nextByte(s, i) - switch { - case n == 0: - i++ // dangling back slash - case b == '.': - dst.WriteByte('.') - default: - writeDomainNameByte(&dst, b) - } - i += n - } - return dst.String() -} - -func sprintTxtOctet(s string) string { - var dst strings.Builder - dst.Grow(2 + len(s)) - dst.WriteByte('"') - for i := 0; i < len(s); { - if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' { - dst.WriteString(s[i : i+2]) - i += 2 - continue - } - - b, n := nextByte(s, i) - switch { - case n == 0: - i++ // dangling back slash - case b == '.': - dst.WriteByte('.') - case b < ' ' || b > '~': - dst.WriteString(escapeByte(b)) - default: - dst.WriteByte(b) - } - i += n - } - dst.WriteByte('"') - return dst.String() -} - -func sprintTxt(txt []string) string { - var out strings.Builder - for i, s := range txt { - out.Grow(3 + len(s)) - if i > 0 { - out.WriteString(` "`) - } else { - out.WriteByte('"') - } - for j := 0; j < len(s); { - b, n := nextByte(s, j) - if n == 0 { - break - } - writeTXTStringByte(&out, b) - j += n - } - out.WriteByte('"') - } - return out.String() -} - -func writeDomainNameByte(s *strings.Builder, b byte) { - switch b { - case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape - s.WriteByte('\\') - s.WriteByte(b) - default: - writeTXTStringByte(s, b) - } -} - -func writeTXTStringByte(s *strings.Builder, b byte) { - switch { - case b == '"' || b == '\\': - s.WriteByte('\\') - s.WriteByte(b) - case b < ' ' || b > '~': - s.WriteString(escapeByte(b)) - default: - s.WriteByte(b) - } -} - -const ( - escapedByteSmall = "" + - `\000\001\002\003\004\005\006\007\008\009` + - `\010\011\012\013\014\015\016\017\018\019` + - `\020\021\022\023\024\025\026\027\028\029` + - `\030\031` - escapedByteLarge = `\127\128\129` + - `\130\131\132\133\134\135\136\137\138\139` + - `\140\141\142\143\144\145\146\147\148\149` + - `\150\151\152\153\154\155\156\157\158\159` + - `\160\161\162\163\164\165\166\167\168\169` + - `\170\171\172\173\174\175\176\177\178\179` + - `\180\181\182\183\184\185\186\187\188\189` + - `\190\191\192\193\194\195\196\197\198\199` + - `\200\201\202\203\204\205\206\207\208\209` + - `\210\211\212\213\214\215\216\217\218\219` + - `\220\221\222\223\224\225\226\227\228\229` + - `\230\231\232\233\234\235\236\237\238\239` + - `\240\241\242\243\244\245\246\247\248\249` + - `\250\251\252\253\254\255` -) - -// escapeByte returns the \DDD escaping of b which must -// satisfy b < ' ' || b > '~'. -func escapeByte(b byte) string { - if b < ' ' { - return escapedByteSmall[b*4 : b*4+4] - } - - b -= '~' + 1 - // The cast here is needed as b*4 may overflow byte. - return escapedByteLarge[int(b)*4 : int(b)*4+4] -} - -func nextByte(s string, offset int) (byte, int) { - if offset >= len(s) { - return 0, 0 - } - if s[offset] != '\\' { - // not an escape sequence - return s[offset], 1 - } - switch len(s) - offset { - case 1: // dangling escape - return 0, 0 - case 2, 3: // too short to be \ddd - default: // maybe \ddd - if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { - return dddStringToByte(s[offset+1:]), 4 - } - } - // not \ddd, just an RFC 1035 "quoted" character - return s[offset+1], 2 -} - -// SPF RR. See RFC 4408, Section 3.1.1. -type SPF struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *SPF) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -// AVC RR. See https://www.iana.org/assignments/dns-parameters/AVC/avc-completed-template. -type AVC struct { - Hdr RR_Header - Txt []string `dns:"txt"` -} - -func (rr *AVC) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) } - -// SRV RR. See RFC 2782. -type SRV struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Port uint16 - Target string `dns:"domain-name"` -} - -func (rr *SRV) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Priority)) + " " + - strconv.Itoa(int(rr.Weight)) + " " + - strconv.Itoa(int(rr.Port)) + " " + sprintName(rr.Target) -} - -// NAPTR RR. See RFC 2915. -type NAPTR struct { - Hdr RR_Header - Order uint16 - Preference uint16 - Flags string - Service string - Regexp string - Replacement string `dns:"domain-name"` -} - -func (rr *NAPTR) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Order)) + " " + - strconv.Itoa(int(rr.Preference)) + " " + - "\"" + rr.Flags + "\" " + - "\"" + rr.Service + "\" " + - "\"" + rr.Regexp + "\" " + - rr.Replacement -} - -// CERT RR. See RFC 4398. -type CERT struct { - Hdr RR_Header - Type uint16 - KeyTag uint16 - Algorithm uint8 - Certificate string `dns:"base64"` -} - -func (rr *CERT) String() string { - var ( - ok bool - certtype, algorithm string - ) - if certtype, ok = CertTypeToString[rr.Type]; !ok { - certtype = strconv.Itoa(int(rr.Type)) - } - if algorithm, ok = AlgorithmToString[rr.Algorithm]; !ok { - algorithm = strconv.Itoa(int(rr.Algorithm)) - } - return rr.Hdr.String() + certtype + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + algorithm + - " " + rr.Certificate -} - -// DNAME RR. See RFC 2672. -type DNAME struct { - Hdr RR_Header - Target string `dns:"domain-name"` -} - -func (rr *DNAME) String() string { - return rr.Hdr.String() + sprintName(rr.Target) -} - -// A RR. See RFC 1035. -type A struct { - Hdr RR_Header - A net.IP `dns:"a"` -} - -func (rr *A) String() string { - if rr.A == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.A.String() -} - -// AAAA RR. See RFC 3596. -type AAAA struct { - Hdr RR_Header - AAAA net.IP `dns:"aaaa"` -} - -func (rr *AAAA) String() string { - if rr.AAAA == nil { - return rr.Hdr.String() - } - return rr.Hdr.String() + rr.AAAA.String() -} - -// PX RR. See RFC 2163. -type PX struct { - Hdr RR_Header - Preference uint16 - Map822 string `dns:"domain-name"` - Mapx400 string `dns:"domain-name"` -} - -func (rr *PX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Map822) + " " + sprintName(rr.Mapx400) -} - -// GPOS RR. See RFC 1712. -type GPOS struct { - Hdr RR_Header - Longitude string - Latitude string - Altitude string -} - -func (rr *GPOS) String() string { - return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude -} - -// LOC RR. See RFC RFC 1876. -type LOC struct { - Hdr RR_Header - Version uint8 - Size uint8 - HorizPre uint8 - VertPre uint8 - Latitude uint32 - Longitude uint32 - Altitude uint32 -} - -// cmToM takes a cm value expressed in RFC1876 SIZE mantissa/exponent -// format and returns a string in m (two decimals for the cm) -func cmToM(m, e uint8) string { - if e < 2 { - if e == 1 { - m *= 10 - } - - return fmt.Sprintf("0.%02d", m) - } - - s := fmt.Sprintf("%d", m) - for e > 2 { - s += "0" - e-- - } - return s -} - -func (rr *LOC) String() string { - s := rr.Hdr.String() - - lat := rr.Latitude - ns := "N" - if lat > LOC_EQUATOR { - lat = lat - LOC_EQUATOR - } else { - ns = "S" - lat = LOC_EQUATOR - lat - } - h := lat / LOC_DEGREES - lat = lat % LOC_DEGREES - m := lat / LOC_HOURS - lat = lat % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lat)/1000, ns) - - lon := rr.Longitude - ew := "E" - if lon > LOC_PRIMEMERIDIAN { - lon = lon - LOC_PRIMEMERIDIAN - } else { - ew = "W" - lon = LOC_PRIMEMERIDIAN - lon - } - h = lon / LOC_DEGREES - lon = lon % LOC_DEGREES - m = lon / LOC_HOURS - lon = lon % LOC_HOURS - s += fmt.Sprintf("%02d %02d %0.3f %s ", h, m, float64(lon)/1000, ew) - - var alt = float64(rr.Altitude) / 100 - alt -= LOC_ALTITUDEBASE - if rr.Altitude%100 != 0 { - s += fmt.Sprintf("%.2fm ", alt) - } else { - s += fmt.Sprintf("%.0fm ", alt) - } - - s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " - s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " - s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" - - return s -} - -// SIG RR. See RFC 2535. The SIG RR is identical to RRSIG and nowadays only used for SIG(0), See RFC 2931. -type SIG struct { - RRSIG -} - -// RRSIG RR. See RFC 4034 and RFC 3755. -type RRSIG struct { - Hdr RR_Header - TypeCovered uint16 - Algorithm uint8 - Labels uint8 - OrigTtl uint32 - Expiration uint32 - Inception uint32 - KeyTag uint16 - SignerName string `dns:"domain-name"` - Signature string `dns:"base64"` -} - -func (rr *RRSIG) String() string { - s := rr.Hdr.String() - s += Type(rr.TypeCovered).String() - s += " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Labels)) + - " " + strconv.FormatInt(int64(rr.OrigTtl), 10) + - " " + TimeToString(rr.Expiration) + - " " + TimeToString(rr.Inception) + - " " + strconv.Itoa(int(rr.KeyTag)) + - " " + sprintName(rr.SignerName) + - " " + rr.Signature - return s -} - -// NSEC RR. See RFC 4034 and RFC 3755. -type NSEC struct { - Hdr RR_Header - NextDomain string `dns:"domain-name"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC) String() string { - s := rr.Hdr.String() + sprintName(rr.NextDomain) - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.NextDomain, off+l, compression, false) - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// DLV RR. See RFC 4431. -type DLV struct{ DS } - -// CDS RR. See RFC 7344. -type CDS struct{ DS } - -// DS RR. See RFC 4034 and RFC 3658. -type DS struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *DS) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -// KX RR. See RFC 2230. -type KX struct { - Hdr RR_Header - Preference uint16 - Exchanger string `dns:"domain-name"` -} - -func (rr *KX) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + sprintName(rr.Exchanger) -} - -// TA RR. See http://www.watson.org/~weiler/INI1999-19.pdf. -type TA struct { - Hdr RR_Header - KeyTag uint16 - Algorithm uint8 - DigestType uint8 - Digest string `dns:"hex"` -} - -func (rr *TA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.KeyTag)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.DigestType)) + - " " + strings.ToUpper(rr.Digest) -} - -// TALINK RR. See https://www.iana.org/assignments/dns-parameters/TALINK/talink-completed-template. -type TALINK struct { - Hdr RR_Header - PreviousName string `dns:"domain-name"` - NextName string `dns:"domain-name"` -} - -func (rr *TALINK) String() string { - return rr.Hdr.String() + - sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) -} - -// SSHFP RR. See RFC RFC 4255. -type SSHFP struct { - Hdr RR_Header - Algorithm uint8 - Type uint8 - FingerPrint string `dns:"hex"` -} - -func (rr *SSHFP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Algorithm)) + - " " + strconv.Itoa(int(rr.Type)) + - " " + strings.ToUpper(rr.FingerPrint) -} - -// KEY RR. See RFC RFC 2535. -type KEY struct { - DNSKEY -} - -// CDNSKEY RR. See RFC 7344. -type CDNSKEY struct { - DNSKEY -} - -// DNSKEY RR. See RFC 4034 and RFC 3755. -type DNSKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *DNSKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -// RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. -type RKEY struct { - Hdr RR_Header - Flags uint16 - Protocol uint8 - Algorithm uint8 - PublicKey string `dns:"base64"` -} - -func (rr *RKEY) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Protocol)) + - " " + strconv.Itoa(int(rr.Algorithm)) + - " " + rr.PublicKey -} - -// NSAPPTR RR. See RFC 1348. -type NSAPPTR struct { - Hdr RR_Header - Ptr string `dns:"domain-name"` -} - -func (rr *NSAPPTR) String() string { return rr.Hdr.String() + sprintName(rr.Ptr) } - -// NSEC3 RR. See RFC 5155. -type NSEC3 struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` - HashLength uint8 - NextDomain string `dns:"size-base32:HashLength"` - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *NSEC3) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) + - " " + rr.NextDomain - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *NSEC3) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// NSEC3PARAM RR. See RFC 5155. -type NSEC3PARAM struct { - Hdr RR_Header - Hash uint8 - Flags uint8 - Iterations uint16 - SaltLength uint8 - Salt string `dns:"size-hex:SaltLength"` -} - -func (rr *NSEC3PARAM) String() string { - s := rr.Hdr.String() - s += strconv.Itoa(int(rr.Hash)) + - " " + strconv.Itoa(int(rr.Flags)) + - " " + strconv.Itoa(int(rr.Iterations)) + - " " + saltToString(rr.Salt) - return s -} - -// TKEY RR. See RFC 2930. -type TKEY struct { - Hdr RR_Header - Algorithm string `dns:"domain-name"` - Inception uint32 - Expiration uint32 - Mode uint16 - Error uint16 - KeySize uint16 - Key string `dns:"size-hex:KeySize"` - OtherLen uint16 - OtherData string `dns:"size-hex:OtherLen"` -} - -// TKEY has no official presentation format, but this will suffice. -func (rr *TKEY) String() string { - s := "\n;; TKEY PSEUDOSECTION:\n" - s += rr.Hdr.String() + " " + rr.Algorithm + " " + - strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + - strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData - return s -} - -// RFC3597 represents an unknown/generic RR. See RFC 3597. -type RFC3597 struct { - Hdr RR_Header - Rdata string `dns:"hex"` -} - -func (rr *RFC3597) String() string { - // Let's call it a hack - s := rfc3597Header(rr.Hdr) - - s += "\\# " + strconv.Itoa(len(rr.Rdata)/2) + " " + rr.Rdata - return s -} - -func rfc3597Header(h RR_Header) string { - var s string - - s += sprintName(h.Name) + "\t" - s += strconv.FormatInt(int64(h.Ttl), 10) + "\t" - s += "CLASS" + strconv.Itoa(int(h.Class)) + "\t" - s += "TYPE" + strconv.Itoa(int(h.Rrtype)) + "\t" - return s -} - -// URI RR. See RFC 7553. -type URI struct { - Hdr RR_Header - Priority uint16 - Weight uint16 - Target string `dns:"octet"` -} - -func (rr *URI) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Priority)) + - " " + strconv.Itoa(int(rr.Weight)) + " " + sprintTxtOctet(rr.Target) -} - -// DHCID RR. See RFC 4701. -type DHCID struct { - Hdr RR_Header - Digest string `dns:"base64"` -} - -func (rr *DHCID) String() string { return rr.Hdr.String() + rr.Digest } - -// TLSA RR. See RFC 6698. -type TLSA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *TLSA) String() string { - return rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) + - " " + rr.Certificate -} - -// SMIMEA RR. See RFC 8162. -type SMIMEA struct { - Hdr RR_Header - Usage uint8 - Selector uint8 - MatchingType uint8 - Certificate string `dns:"hex"` -} - -func (rr *SMIMEA) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.Usage)) + - " " + strconv.Itoa(int(rr.Selector)) + - " " + strconv.Itoa(int(rr.MatchingType)) - - // Every Nth char needs a space on this output. If we output - // this as one giant line, we can't read it can in because in some cases - // the cert length overflows scan.maxTok (2048). - sx := splitN(rr.Certificate, 1024) // conservative value here - s += " " + strings.Join(sx, " ") - return s -} - -// HIP RR. See RFC 8005. -type HIP struct { - Hdr RR_Header - HitLength uint8 - PublicKeyAlgorithm uint8 - PublicKeyLength uint16 - Hit string `dns:"size-hex:HitLength"` - PublicKey string `dns:"size-base64:PublicKeyLength"` - RendezvousServers []string `dns:"domain-name"` -} - -func (rr *HIP) String() string { - s := rr.Hdr.String() + - strconv.Itoa(int(rr.PublicKeyAlgorithm)) + - " " + rr.Hit + - " " + rr.PublicKey - for _, d := range rr.RendezvousServers { - s += " " + sprintName(d) - } - return s -} - -// NINFO RR. See https://www.iana.org/assignments/dns-parameters/NINFO/ninfo-completed-template. -type NINFO struct { - Hdr RR_Header - ZSData []string `dns:"txt"` -} - -func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } - -// NID RR. See RFC RFC 6742. -type NID struct { - Hdr RR_Header - Preference uint16 - NodeID uint64 -} - -func (rr *NID) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16x", rr.NodeID) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -// L32 RR, See RFC 6742. -type L32 struct { - Hdr RR_Header - Preference uint16 - Locator32 net.IP `dns:"a"` -} - -func (rr *L32) String() string { - if rr.Locator32 == nil { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - } - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + - " " + rr.Locator32.String() -} - -// L64 RR, See RFC 6742. -type L64 struct { - Hdr RR_Header - Preference uint16 - Locator64 uint64 -} - -func (rr *L64) String() string { - s := rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) - node := fmt.Sprintf("%0.16X", rr.Locator64) - s += " " + node[0:4] + ":" + node[4:8] + ":" + node[8:12] + ":" + node[12:16] - return s -} - -// LP RR. See RFC 6742. -type LP struct { - Hdr RR_Header - Preference uint16 - Fqdn string `dns:"domain-name"` -} - -func (rr *LP) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Preference)) + " " + sprintName(rr.Fqdn) -} - -// EUI48 RR. See RFC 7043. -type EUI48 struct { - Hdr RR_Header - Address uint64 `dns:"uint48"` -} - -func (rr *EUI48) String() string { return rr.Hdr.String() + euiToString(rr.Address, 48) } - -// EUI64 RR. See RFC 7043. -type EUI64 struct { - Hdr RR_Header - Address uint64 -} - -func (rr *EUI64) String() string { return rr.Hdr.String() + euiToString(rr.Address, 64) } - -// CAA RR. See RFC 6844. -type CAA struct { - Hdr RR_Header - Flag uint8 - Tag string - Value string `dns:"octet"` -} - -func (rr *CAA) String() string { - return rr.Hdr.String() + strconv.Itoa(int(rr.Flag)) + " " + rr.Tag + " " + sprintTxtOctet(rr.Value) -} - -// UID RR. Deprecated, IANA-Reserved. -type UID struct { - Hdr RR_Header - Uid uint32 -} - -func (rr *UID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Uid), 10) } - -// GID RR. Deprecated, IANA-Reserved. -type GID struct { - Hdr RR_Header - Gid uint32 -} - -func (rr *GID) String() string { return rr.Hdr.String() + strconv.FormatInt(int64(rr.Gid), 10) } - -// UINFO RR. Deprecated, IANA-Reserved. -type UINFO struct { - Hdr RR_Header - Uinfo string -} - -func (rr *UINFO) String() string { return rr.Hdr.String() + sprintTxt([]string{rr.Uinfo}) } - -// EID RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. -type EID struct { - Hdr RR_Header - Endpoint string `dns:"hex"` -} - -func (rr *EID) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Endpoint) } - -// NIMLOC RR. See http://ana-3.lcs.mit.edu/~jnc/nimrod/dns.txt. -type NIMLOC struct { - Hdr RR_Header - Locator string `dns:"hex"` -} - -func (rr *NIMLOC) String() string { return rr.Hdr.String() + strings.ToUpper(rr.Locator) } - -// OPENPGPKEY RR. See RFC 7929. -type OPENPGPKEY struct { - Hdr RR_Header - PublicKey string `dns:"base64"` -} - -func (rr *OPENPGPKEY) String() string { return rr.Hdr.String() + rr.PublicKey } - -// CSYNC RR. See RFC 7477. -type CSYNC struct { - Hdr RR_Header - Serial uint32 - Flags uint16 - TypeBitMap []uint16 `dns:"nsec"` -} - -func (rr *CSYNC) String() string { - s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) - - for i := 0; i < len(rr.TypeBitMap); i++ { - s += " " + Type(rr.TypeBitMap[i]).String() - } - return s -} - -func (rr *CSYNC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 + 2 - lastwindow := uint32(2 ^ 32 + 1) - for _, t := range rr.TypeBitMap { - window := t / 256 - if uint32(window) != lastwindow { - l += 1 + 32 - } - lastwindow = uint32(window) - } - return l -} - -// TimeToString translates the RRSIG's incep. and expir. times to the -// string representation used when printing the record. -// It takes serial arithmetic (RFC 1982) into account. -func TimeToString(t uint32) string { - mod := (int64(t)-time.Now().Unix())/year68 - 1 - if mod < 0 { - mod = 0 - } - ti := time.Unix(int64(t)-mod*year68, 0).UTC() - return ti.Format("20060102150405") -} - -// StringToTime translates the RRSIG's incep. and expir. times from -// string values like "20110403154150" to an 32 bit integer. -// It takes serial arithmetic (RFC 1982) into account. -func StringToTime(s string) (uint32, error) { - t, err := time.Parse("20060102150405", s) - if err != nil { - return 0, err - } - mod := t.Unix()/year68 - 1 - if mod < 0 { - mod = 0 - } - return uint32(t.Unix() - mod*year68), nil -} - -// saltToString converts a NSECX salt to uppercase and returns "-" when it is empty. -func saltToString(s string) string { - if len(s) == 0 { - return "-" - } - return strings.ToUpper(s) -} - -func euiToString(eui uint64, bits int) (hex string) { - switch bits { - case 64: - hex = fmt.Sprintf("%16.16x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] + "-" + hex[12:14] + "-" + hex[14:16] - case 48: - hex = fmt.Sprintf("%12.12x", eui) - hex = hex[0:2] + "-" + hex[2:4] + "-" + hex[4:6] + "-" + hex[6:8] + - "-" + hex[8:10] + "-" + hex[10:12] - } - return -} - -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p -} - -// SplitN splits a string into N sized string chunks. -// This might become an exported function once. -func splitN(s string, n int) []string { - if len(s) < n { - return []string{s} - } - sx := []string{} - p, i := 0, n - for { - if i <= len(s) { - sx = append(sx, s[p:i]) - } else { - sx = append(sx, s[p:]) - break - - } - p, i = p+n, i+n - } - - return sx -} diff --git a/vendor/github.com/miekg/dns/types_generate.go b/vendor/github.com/miekg/dns/types_generate.go deleted file mode 100644 index 8c897ec11..000000000 --- a/vendor/github.com/miekg/dns/types_generate.go +++ /dev/null @@ -1,278 +0,0 @@ -//+build ignore - -// types_generate.go is meant to run with go generate. It will use -// go/{importer,types} to track down all the RR struct types. Then for each type -// it will generate conversion tables (TypeToRR and TypeToString) and banal -// methods (len, Header, copy) based on the struct tags. The generated source is -// written to ztypes.go, and is meant to be checked into git. -package main - -import ( - "bytes" - "fmt" - "go/format" - "go/importer" - "go/types" - "log" - "os" - "strings" - "text/template" -) - -var skipLen = map[string]struct{}{ - "NSEC": {}, - "NSEC3": {}, - "OPT": {}, - "CSYNC": {}, -} - -var packageHdr = ` -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -` - -var TypeToRR = template.Must(template.New("TypeToRR").Parse(` -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ -{{range .}}{{if ne . "RFC3597"}} Type{{.}}: func() RR { return new({{.}}) }, -{{end}}{{end}} } - -`)) - -var typeToString = template.Must(template.New("typeToString").Parse(` -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ -{{range .}}{{if ne . "NSAPPTR"}} Type{{.}}: "{{.}}", -{{end}}{{end}} TypeNSAPPTR: "NSAP-PTR", -} - -`)) - -var headerFunc = template.Must(template.New("headerFunc").Parse(` -{{range .}} func (rr *{{.}}) Header() *RR_Header { return &rr.Hdr } -{{end}} - -`)) - -// getTypeStruct will take a type and the package scope, and return the -// (innermost) struct if the type is considered a RR type (currently defined as -// those structs beginning with a RR_Header, could be redefined as implementing -// the RR interface). The bool return value indicates if embedded structs were -// resolved. -func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) { - st, ok := t.Underlying().(*types.Struct) - if !ok { - return nil, false - } - if st.Field(0).Type() == scope.Lookup("RR_Header").Type() { - return st, false - } - if st.Field(0).Anonymous() { - st, _ := getTypeStruct(st.Field(0).Type(), scope) - return st, true - } - return nil, false -} - -func main() { - // Import and type-check the package - pkg, err := importer.Default().Import("github.com/miekg/dns") - fatalIfErr(err) - scope := pkg.Scope() - - // Collect constants like TypeX - var numberedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - b, ok := o.Type().(*types.Basic) - if !ok || b.Kind() != types.Uint16 { - continue - } - if !strings.HasPrefix(o.Name(), "Type") { - continue - } - name := strings.TrimPrefix(o.Name(), "Type") - if name == "PrivateRR" { - continue - } - numberedTypes = append(numberedTypes, name) - } - - // Collect actual types (*X) - var namedTypes []string - for _, name := range scope.Names() { - o := scope.Lookup(name) - if o == nil || !o.Exported() { - continue - } - if st, _ := getTypeStruct(o.Type(), scope); st == nil { - continue - } - if name == "PrivateRR" { - continue - } - - // Check if corresponding TypeX exists - if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" { - log.Fatalf("Constant Type%s does not exist.", o.Name()) - } - - namedTypes = append(namedTypes, o.Name()) - } - - b := &bytes.Buffer{} - b.WriteString(packageHdr) - - // Generate TypeToRR - fatalIfErr(TypeToRR.Execute(b, namedTypes)) - - // Generate typeToString - fatalIfErr(typeToString.Execute(b, numberedTypes)) - - // Generate headerFunc - fatalIfErr(headerFunc.Execute(b, namedTypes)) - - // Generate len() - fmt.Fprint(b, "// len() functions\n") - for _, name := range namedTypes { - if _, ok := skipLen[name]; ok { - continue - } - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name) - fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n") - for i := 1; i < st.NumFields(); i++ { - o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } - - if _, ok := st.Field(i).Type().(*types.Slice); ok { - switch st.Tag(i) { - case `dns:"-"`: - // ignored - case `dns:"cdomain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n") - case `dns:"domain-name"`: - o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n") - case `dns:"txt"`: - o("for _, x := range rr.%s { l += len(x) + 1 }\n") - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - continue - } - - switch { - case st.Tag(i) == `dns:"-"`: - // ignored - case st.Tag(i) == `dns:"cdomain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, true)\n") - case st.Tag(i) == `dns:"domain-name"`: - o("l += domainNameLen(rr.%s, off+l, compression, false)\n") - case st.Tag(i) == `dns:"octet"`: - o("l += len(rr.%s)\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): - fallthrough - case st.Tag(i) == `dns:"base64"`: - o("l += base64.StdEncoding.DecodedLen(len(rr.%s))\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex:`): // this has an extra field where the length is stored - o("l += len(rr.%s)/2\n") - case strings.HasPrefix(st.Tag(i), `dns:"size-hex`): - fallthrough - case st.Tag(i) == `dns:"hex"`: - o("l += len(rr.%s)/2 + 1\n") - case st.Tag(i) == `dns:"a"`: - o("l += net.IPv4len // %s\n") - case st.Tag(i) == `dns:"aaaa"`: - o("l += net.IPv6len // %s\n") - case st.Tag(i) == `dns:"txt"`: - o("for _, t := range rr.%s { l += len(t) + 1 }\n") - case st.Tag(i) == `dns:"uint48"`: - o("l += 6 // %s\n") - case st.Tag(i) == "": - switch st.Field(i).Type().(*types.Basic).Kind() { - case types.Uint8: - o("l++ // %s\n") - case types.Uint16: - o("l += 2 // %s\n") - case types.Uint32: - o("l += 4 // %s\n") - case types.Uint64: - o("l += 8 // %s\n") - case types.String: - o("l += len(rr.%s) + 1\n") - default: - log.Fatalln(name, st.Field(i).Name()) - } - default: - log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) - } - } - fmt.Fprintf(b, "return l }\n") - } - - // Generate copy() - fmt.Fprint(b, "// copy() functions\n") - for _, name := range namedTypes { - o := scope.Lookup(name) - st, isEmbedded := getTypeStruct(o.Type(), scope) - if isEmbedded { - continue - } - fmt.Fprintf(b, "func (rr *%s) copy() RR {\n", name) - fields := []string{"rr.Hdr"} - for i := 1; i < st.NumFields(); i++ { - f := st.Field(i).Name() - if sl, ok := st.Field(i).Type().(*types.Slice); ok { - t := sl.Underlying().String() - t = strings.TrimPrefix(t, "[]") - if strings.Contains(t, ".") { - splits := strings.Split(t, ".") - t = splits[len(splits)-1] - } - fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", - f, t, f, f, f) - fields = append(fields, f) - continue - } - if st.Field(i).Type().String() == "net.IP" { - fields = append(fields, "copyIP(rr."+f+")") - continue - } - fields = append(fields, "rr."+f) - } - fmt.Fprintf(b, "return &%s{%s}\n", name, strings.Join(fields, ",")) - fmt.Fprintf(b, "}\n") - } - - // gofmt - res, err := format.Source(b.Bytes()) - if err != nil { - b.WriteTo(os.Stderr) - log.Fatal(err) - } - - // write result - f, err := os.Create("ztypes.go") - fatalIfErr(err) - defer f.Close() - f.Write(res) -} - -func fatalIfErr(err error) { - if err != nil { - log.Fatal(err) - } -} diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go deleted file mode 100644 index a4826ee2f..000000000 --- a/vendor/github.com/miekg/dns/udp.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build !windows - -package dns - -import ( - "net" - - "golang.org/x/net/ipv4" - "golang.org/x/net/ipv6" -) - -// This is the required size of the OOB buffer to pass to ReadMsgUDP. -var udpOOBSize = func() int { - // We can't know whether we'll get an IPv4 control message or an - // IPv6 control message ahead of time. To get around this, we size - // the buffer equal to the largest of the two. - - oob4 := ipv4.NewControlMessage(ipv4.FlagDst | ipv4.FlagInterface) - oob6 := ipv6.NewControlMessage(ipv6.FlagDst | ipv6.FlagInterface) - - if len(oob4) > len(oob6) { - return len(oob4) - } - - return len(oob6) -}() - -// SessionUDP holds the remote address and the associated -// out-of-band data. -type SessionUDP struct { - raddr *net.UDPAddr - context []byte -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - oob := make([]byte, udpOOBSize) - n, oobn, _, raddr, err := conn.ReadMsgUDP(b, oob) - if err != nil { - return n, nil, err - } - return n, &SessionUDP{raddr, oob[:oobn]}, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - oob := correctSource(session.context) - n, _, err := conn.WriteMsgUDP(b, oob, session.raddr) - return n, err -} - -func setUDPSocketOptions(conn *net.UDPConn) error { - // Try setting the flags for both families and ignore the errors unless they - // both error. - err6 := ipv6.NewPacketConn(conn).SetControlMessage(ipv6.FlagDst|ipv6.FlagInterface, true) - err4 := ipv4.NewPacketConn(conn).SetControlMessage(ipv4.FlagDst|ipv4.FlagInterface, true) - if err6 != nil && err4 != nil { - return err4 - } - return nil -} - -// parseDstFromOOB takes oob data and returns the destination IP. -func parseDstFromOOB(oob []byte) net.IP { - // Start with IPv6 and then fallback to IPv4 - // TODO(fastest963): Figure out a way to prefer one or the other. Looking at - // the lvl of the header for a 0 or 41 isn't cross-platform. - cm6 := new(ipv6.ControlMessage) - if cm6.Parse(oob) == nil && cm6.Dst != nil { - return cm6.Dst - } - cm4 := new(ipv4.ControlMessage) - if cm4.Parse(oob) == nil && cm4.Dst != nil { - return cm4.Dst - } - return nil -} - -// correctSource takes oob data and returns new oob data with the Src equal to the Dst -func correctSource(oob []byte) []byte { - dst := parseDstFromOOB(oob) - if dst == nil { - return nil - } - // If the dst is definitely an IPv6, then use ipv6's ControlMessage to - // respond otherwise use ipv4's because ipv6's marshal ignores ipv4 - // addresses. - if dst.To4() == nil { - cm := new(ipv6.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } else { - cm := new(ipv4.ControlMessage) - cm.Src = dst - oob = cm.Marshal() - } - return oob -} diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go deleted file mode 100644 index 6778c3c6c..000000000 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package dns - -import "net" - -// SessionUDP holds the remote address -type SessionUDP struct { - raddr *net.UDPAddr -} - -// RemoteAddr returns the remote network address. -func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } - -// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a -// net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. -func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { - n, raddr, err := conn.ReadFrom(b) - if err != nil { - return n, nil, err - } - session := &SessionUDP{raddr.(*net.UDPAddr)} - return n, session, err -} - -// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. -func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { - n, err := conn.WriteTo(b, session.raddr) - return n, err -} - -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. -func setUDPSocketOptions(*net.UDPConn) error { return nil } -func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/update.go b/vendor/github.com/miekg/dns/update.go deleted file mode 100644 index e90c5c968..000000000 --- a/vendor/github.com/miekg/dns/update.go +++ /dev/null @@ -1,106 +0,0 @@ -package dns - -// NameUsed sets the RRs in the prereq section to -// "Name is in use" RRs. RFC 2136 section 2.4.4. -func (u *Msg) NameUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// NameNotUsed sets the RRs in the prereq section to -// "Name is in not use" RRs. RFC 2136 section 2.4.5. -func (u *Msg) NameNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassNONE}}) - } -} - -// Used sets the RRs in the prereq section to -// "RRset exists (value dependent -- with rdata)" RRs. RFC 2136 section 2.4.2. -func (u *Msg) Used(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Answer = append(u.Answer, r) - } -} - -// RRsetUsed sets the RRs in the prereq section to -// "RRset exists (value independent -- no rdata)" RRs. RFC 2136 section 2.4.1. -func (u *Msg) RRsetUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RRsetNotUsed sets the RRs in the prereq section to -// "RRset does not exist" RRs. RFC 2136 section 2.4.3. -func (u *Msg) RRsetNotUsed(rr []RR) { - if u.Answer == nil { - u.Answer = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) - } -} - -// Insert creates a dynamic update packet that adds an complete RRset, see RFC 2136 section 2.5.1. -func (u *Msg) Insert(rr []RR) { - if len(u.Question) == 0 { - panic("dns: empty question section") - } - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = u.Question[0].Qclass - u.Ns = append(u.Ns, r) - } -} - -// RemoveRRset creates a dynamic update packet that deletes an RRset, see RFC 2136 section 2.5.2. -func (u *Msg) RemoveRRset(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) - } -} - -// RemoveName creates a dynamic update packet that deletes all RRsets of a name, see RFC 2136 section 2.5.3 -func (u *Msg) RemoveName(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: TypeANY, Class: ClassANY}}) - } -} - -// Remove creates a dynamic update packet deletes RR from a RRSset, see RFC 2136 section 2.5.4 -func (u *Msg) Remove(rr []RR) { - if u.Ns == nil { - u.Ns = make([]RR, 0, len(rr)) - } - for _, r := range rr { - r.Header().Class = ClassNONE - r.Header().Ttl = 0 - u.Ns = append(u.Ns, r) - } -} diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go deleted file mode 100644 index 257dee4d1..000000000 --- a/vendor/github.com/miekg/dns/version.go +++ /dev/null @@ -1,15 +0,0 @@ -package dns - -import "fmt" - -// Version is current version of this library. -var Version = V{1, 1, 1} - -// V holds the version of this library. -type V struct { - Major, Minor, Patch int -} - -func (v V) String() string { - return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch) -} diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go deleted file mode 100644 index 5d0ff5c8a..000000000 --- a/vendor/github.com/miekg/dns/xfr.go +++ /dev/null @@ -1,260 +0,0 @@ -package dns - -import ( - "fmt" - "time" -) - -// Envelope is used when doing a zone transfer with a remote server. -type Envelope struct { - RR []RR // The set of RRs in the answer section of the xfr reply message. - Error error // If something went wrong, this contains the error. -} - -// A Transfer defines parameters that are used during a zone transfer. -type Transfer struct { - *Conn - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - TsigSecret map[string]string // Secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - tsigTimersOnly bool -} - -// Think we need to away to stop the transfer - -// In performs an incoming transfer with the server in a. -// If you would like to set the source IP, or some other attribute -// of a Dialer for a Transfer, you can do so by specifying the attributes -// in the Transfer.Conn: -// -// d := net.Dialer{LocalAddr: transfer_source} -// con, err := d.Dial("tcp", master) -// dnscon := &dns.Conn{Conn:con} -// transfer = &dns.Transfer{Conn: dnscon} -// channel, err := transfer.In(message, master) -// -func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { - timeout := dnsTimeout - if t.DialTimeout != 0 { - timeout = t.DialTimeout - } - if t.Conn == nil { - t.Conn, err = DialTimeout("tcp", a, timeout) - if err != nil { - return nil, err - } - } - if err := t.WriteMsg(q); err != nil { - return nil, err - } - env = make(chan *Envelope) - go func() { - if q.Question[0].Qtype == TypeAXFR { - go t.inAxfr(q, env) - return - } - if q.Question[0].Qtype == TypeIXFR { - go t.inIxfr(q, env) - return - } - }() - return env, nil -} - -func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { - first := true - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.Conn.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if q.Id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if first { - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - first = !first - // only one answer that is SOA, receive more - if len(in.Answer) == 1 { - t.tsigTimersOnly = true - c <- &Envelope{in.Answer, nil} - continue - } - } - - if !first { - t.tsigTimersOnly = true // Subsequent envelopes use this. - if isSOALast(in) { - c <- &Envelope{in.Answer, nil} - return - } - c <- &Envelope{in.Answer, nil} - } - } -} - -func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { - serial := uint32(0) // The first serial seen is the current server serial - axfr := true - n := 0 - qser := q.Ns[0].(*SOA).Serial - defer t.Close() - defer close(c) - timeout := dnsTimeout - if t.ReadTimeout != 0 { - timeout = t.ReadTimeout - } - for { - t.SetReadDeadline(time.Now().Add(timeout)) - in, err := t.ReadMsg() - if err != nil { - c <- &Envelope{nil, err} - return - } - if q.Id != in.Id { - c <- &Envelope{in.Answer, ErrId} - return - } - if in.Rcode != RcodeSuccess { - c <- &Envelope{in.Answer, &Error{err: fmt.Sprintf(errXFR, in.Rcode)}} - return - } - if n == 0 { - // Check if the returned answer is ok - if !isSOAFirst(in) { - c <- &Envelope{in.Answer, ErrSoa} - return - } - // This serial is important - serial = in.Answer[0].(*SOA).Serial - // Check if there are no changes in zone - if qser >= serial { - c <- &Envelope{in.Answer, nil} - return - } - } - // Now we need to check each message for SOA records, to see what we need to do - t.tsigTimersOnly = true - for _, rr := range in.Answer { - if v, ok := rr.(*SOA); ok { - if v.Serial == serial { - n++ - // quit if it's a full axfr or the the servers' SOA is repeated the third time - if axfr && n == 2 || n == 3 { - c <- &Envelope{in.Answer, nil} - return - } - } else if axfr { - // it's an ixfr - axfr = false - } - } - } - c <- &Envelope{in.Answer, nil} - } -} - -// Out performs an outgoing transfer with the client connecting in w. -// Basic use pattern: -// -// ch := make(chan *dns.Envelope) -// tr := new(dns.Transfer) -// go tr.Out(w, r, ch) -// ch <- &dns.Envelope{RR: []dns.RR{soa, rr1, rr2, rr3, soa}} -// close(ch) -// w.Hijack() -// // w.Close() // Client closes connection -// -// The server is responsible for sending the correct sequence of RRs through the -// channel ch. -func (t *Transfer) Out(w ResponseWriter, q *Msg, ch chan *Envelope) error { - for x := range ch { - r := new(Msg) - // Compress? - r.SetReply(q) - r.Authoritative = true - // assume it fits TODO(miek): fix - r.Answer = append(r.Answer, x.RR...) - if err := w.WriteMsg(r); err != nil { - return err - } - } - w.TsigTimersOnly(true) - return nil -} - -// ReadMsg reads a message from the transfer connection t. -func (t *Transfer) ReadMsg() (*Msg, error) { - m := new(Msg) - p := make([]byte, MaxMsgSize) - n, err := t.Read(p) - if err != nil && n == 0 { - return nil, err - } - p = p[:n] - if err := m.Unpack(p); err != nil { - return nil, err - } - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return m, ErrSecret - } - // Need to work on the original message p, as that was used to calculate the tsig. - err = TsigVerify(p, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - t.tsigRequestMAC = ts.MAC - } - return m, err -} - -// WriteMsg writes a message through the transfer connection t. -func (t *Transfer) WriteMsg(m *Msg) (err error) { - var out []byte - if ts := m.IsTsig(); ts != nil && t.TsigSecret != nil { - if _, ok := t.TsigSecret[ts.Hdr.Name]; !ok { - return ErrSecret - } - out, t.tsigRequestMAC, err = TsigGenerate(m, t.TsigSecret[ts.Hdr.Name], t.tsigRequestMAC, t.tsigTimersOnly) - } else { - out, err = m.Pack() - } - if err != nil { - return err - } - if _, err = t.Write(out); err != nil { - return err - } - return nil -} - -func isSOAFirst(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[0].Header().Rrtype == TypeSOA - } - return false -} - -func isSOALast(in *Msg) bool { - if len(in.Answer) > 0 { - return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA - } - return false -} - -const errXFR = "bad xfr rcode: %d" diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go deleted file mode 100644 index ba9863b23..000000000 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ /dev/null @@ -1,943 +0,0 @@ -// Code generated by "go run duplicate_generate.go"; DO NOT EDIT. - -package dns - -// isDuplicateRdata calls the rdata specific functions -func isDuplicateRdata(r1, r2 RR) bool { - switch r1.Header().Rrtype { - case TypeA: - return isDuplicateA(r1.(*A), r2.(*A)) - case TypeAAAA: - return isDuplicateAAAA(r1.(*AAAA), r2.(*AAAA)) - case TypeAFSDB: - return isDuplicateAFSDB(r1.(*AFSDB), r2.(*AFSDB)) - case TypeAVC: - return isDuplicateAVC(r1.(*AVC), r2.(*AVC)) - case TypeCAA: - return isDuplicateCAA(r1.(*CAA), r2.(*CAA)) - case TypeCERT: - return isDuplicateCERT(r1.(*CERT), r2.(*CERT)) - case TypeCNAME: - return isDuplicateCNAME(r1.(*CNAME), r2.(*CNAME)) - case TypeCSYNC: - return isDuplicateCSYNC(r1.(*CSYNC), r2.(*CSYNC)) - case TypeDHCID: - return isDuplicateDHCID(r1.(*DHCID), r2.(*DHCID)) - case TypeDNAME: - return isDuplicateDNAME(r1.(*DNAME), r2.(*DNAME)) - case TypeDNSKEY: - return isDuplicateDNSKEY(r1.(*DNSKEY), r2.(*DNSKEY)) - case TypeDS: - return isDuplicateDS(r1.(*DS), r2.(*DS)) - case TypeEID: - return isDuplicateEID(r1.(*EID), r2.(*EID)) - case TypeEUI48: - return isDuplicateEUI48(r1.(*EUI48), r2.(*EUI48)) - case TypeEUI64: - return isDuplicateEUI64(r1.(*EUI64), r2.(*EUI64)) - case TypeGID: - return isDuplicateGID(r1.(*GID), r2.(*GID)) - case TypeGPOS: - return isDuplicateGPOS(r1.(*GPOS), r2.(*GPOS)) - case TypeHINFO: - return isDuplicateHINFO(r1.(*HINFO), r2.(*HINFO)) - case TypeHIP: - return isDuplicateHIP(r1.(*HIP), r2.(*HIP)) - case TypeKX: - return isDuplicateKX(r1.(*KX), r2.(*KX)) - case TypeL32: - return isDuplicateL32(r1.(*L32), r2.(*L32)) - case TypeL64: - return isDuplicateL64(r1.(*L64), r2.(*L64)) - case TypeLOC: - return isDuplicateLOC(r1.(*LOC), r2.(*LOC)) - case TypeLP: - return isDuplicateLP(r1.(*LP), r2.(*LP)) - case TypeMB: - return isDuplicateMB(r1.(*MB), r2.(*MB)) - case TypeMD: - return isDuplicateMD(r1.(*MD), r2.(*MD)) - case TypeMF: - return isDuplicateMF(r1.(*MF), r2.(*MF)) - case TypeMG: - return isDuplicateMG(r1.(*MG), r2.(*MG)) - case TypeMINFO: - return isDuplicateMINFO(r1.(*MINFO), r2.(*MINFO)) - case TypeMR: - return isDuplicateMR(r1.(*MR), r2.(*MR)) - case TypeMX: - return isDuplicateMX(r1.(*MX), r2.(*MX)) - case TypeNAPTR: - return isDuplicateNAPTR(r1.(*NAPTR), r2.(*NAPTR)) - case TypeNID: - return isDuplicateNID(r1.(*NID), r2.(*NID)) - case TypeNIMLOC: - return isDuplicateNIMLOC(r1.(*NIMLOC), r2.(*NIMLOC)) - case TypeNINFO: - return isDuplicateNINFO(r1.(*NINFO), r2.(*NINFO)) - case TypeNS: - return isDuplicateNS(r1.(*NS), r2.(*NS)) - case TypeNSAPPTR: - return isDuplicateNSAPPTR(r1.(*NSAPPTR), r2.(*NSAPPTR)) - case TypeNSEC: - return isDuplicateNSEC(r1.(*NSEC), r2.(*NSEC)) - case TypeNSEC3: - return isDuplicateNSEC3(r1.(*NSEC3), r2.(*NSEC3)) - case TypeNSEC3PARAM: - return isDuplicateNSEC3PARAM(r1.(*NSEC3PARAM), r2.(*NSEC3PARAM)) - case TypeOPENPGPKEY: - return isDuplicateOPENPGPKEY(r1.(*OPENPGPKEY), r2.(*OPENPGPKEY)) - case TypePTR: - return isDuplicatePTR(r1.(*PTR), r2.(*PTR)) - case TypePX: - return isDuplicatePX(r1.(*PX), r2.(*PX)) - case TypeRKEY: - return isDuplicateRKEY(r1.(*RKEY), r2.(*RKEY)) - case TypeRP: - return isDuplicateRP(r1.(*RP), r2.(*RP)) - case TypeRRSIG: - return isDuplicateRRSIG(r1.(*RRSIG), r2.(*RRSIG)) - case TypeRT: - return isDuplicateRT(r1.(*RT), r2.(*RT)) - case TypeSMIMEA: - return isDuplicateSMIMEA(r1.(*SMIMEA), r2.(*SMIMEA)) - case TypeSOA: - return isDuplicateSOA(r1.(*SOA), r2.(*SOA)) - case TypeSPF: - return isDuplicateSPF(r1.(*SPF), r2.(*SPF)) - case TypeSRV: - return isDuplicateSRV(r1.(*SRV), r2.(*SRV)) - case TypeSSHFP: - return isDuplicateSSHFP(r1.(*SSHFP), r2.(*SSHFP)) - case TypeTA: - return isDuplicateTA(r1.(*TA), r2.(*TA)) - case TypeTALINK: - return isDuplicateTALINK(r1.(*TALINK), r2.(*TALINK)) - case TypeTKEY: - return isDuplicateTKEY(r1.(*TKEY), r2.(*TKEY)) - case TypeTLSA: - return isDuplicateTLSA(r1.(*TLSA), r2.(*TLSA)) - case TypeTSIG: - return isDuplicateTSIG(r1.(*TSIG), r2.(*TSIG)) - case TypeTXT: - return isDuplicateTXT(r1.(*TXT), r2.(*TXT)) - case TypeUID: - return isDuplicateUID(r1.(*UID), r2.(*UID)) - case TypeUINFO: - return isDuplicateUINFO(r1.(*UINFO), r2.(*UINFO)) - case TypeURI: - return isDuplicateURI(r1.(*URI), r2.(*URI)) - case TypeX25: - return isDuplicateX25(r1.(*X25), r2.(*X25)) - } - return false -} - -// isDuplicate() functions - -func isDuplicateA(r1, r2 *A) bool { - if len(r1.A) != len(r2.A) { - return false - } - for i := 0; i < len(r1.A); i++ { - if r1.A[i] != r2.A[i] { - return false - } - } - return true -} - -func isDuplicateAAAA(r1, r2 *AAAA) bool { - if len(r1.AAAA) != len(r2.AAAA) { - return false - } - for i := 0; i < len(r1.AAAA); i++ { - if r1.AAAA[i] != r2.AAAA[i] { - return false - } - } - return true -} - -func isDuplicateAFSDB(r1, r2 *AFSDB) bool { - if r1.Subtype != r2.Subtype { - return false - } - if !isDulicateName(r1.Hostname, r2.Hostname) { - return false - } - return true -} - -func isDuplicateAVC(r1, r2 *AVC) bool { - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func isDuplicateCAA(r1, r2 *CAA) bool { - if r1.Flag != r2.Flag { - return false - } - if r1.Tag != r2.Tag { - return false - } - if r1.Value != r2.Value { - return false - } - return true -} - -func isDuplicateCERT(r1, r2 *CERT) bool { - if r1.Type != r2.Type { - return false - } - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func isDuplicateCNAME(r1, r2 *CNAME) bool { - if !isDulicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func isDuplicateCSYNC(r1, r2 *CSYNC) bool { - if r1.Serial != r2.Serial { - return false - } - if r1.Flags != r2.Flags { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func isDuplicateDHCID(r1, r2 *DHCID) bool { - if r1.Digest != r2.Digest { - return false - } - return true -} - -func isDuplicateDNAME(r1, r2 *DNAME) bool { - if !isDulicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func isDuplicateDNSKEY(r1, r2 *DNSKEY) bool { - if r1.Flags != r2.Flags { - return false - } - if r1.Protocol != r2.Protocol { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func isDuplicateDS(r1, r2 *DS) bool { - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.DigestType != r2.DigestType { - return false - } - if r1.Digest != r2.Digest { - return false - } - return true -} - -func isDuplicateEID(r1, r2 *EID) bool { - if r1.Endpoint != r2.Endpoint { - return false - } - return true -} - -func isDuplicateEUI48(r1, r2 *EUI48) bool { - if r1.Address != r2.Address { - return false - } - return true -} - -func isDuplicateEUI64(r1, r2 *EUI64) bool { - if r1.Address != r2.Address { - return false - } - return true -} - -func isDuplicateGID(r1, r2 *GID) bool { - if r1.Gid != r2.Gid { - return false - } - return true -} - -func isDuplicateGPOS(r1, r2 *GPOS) bool { - if r1.Longitude != r2.Longitude { - return false - } - if r1.Latitude != r2.Latitude { - return false - } - if r1.Altitude != r2.Altitude { - return false - } - return true -} - -func isDuplicateHINFO(r1, r2 *HINFO) bool { - if r1.Cpu != r2.Cpu { - return false - } - if r1.Os != r2.Os { - return false - } - return true -} - -func isDuplicateHIP(r1, r2 *HIP) bool { - if r1.HitLength != r2.HitLength { - return false - } - if r1.PublicKeyAlgorithm != r2.PublicKeyAlgorithm { - return false - } - if r1.PublicKeyLength != r2.PublicKeyLength { - return false - } - if r1.Hit != r2.Hit { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - if len(r1.RendezvousServers) != len(r2.RendezvousServers) { - return false - } - for i := 0; i < len(r1.RendezvousServers); i++ { - if !isDulicateName(r1.RendezvousServers[i], r2.RendezvousServers[i]) { - return false - } - } - return true -} - -func isDuplicateKX(r1, r2 *KX) bool { - if r1.Preference != r2.Preference { - return false - } - if !isDulicateName(r1.Exchanger, r2.Exchanger) { - return false - } - return true -} - -func isDuplicateL32(r1, r2 *L32) bool { - if r1.Preference != r2.Preference { - return false - } - if len(r1.Locator32) != len(r2.Locator32) { - return false - } - for i := 0; i < len(r1.Locator32); i++ { - if r1.Locator32[i] != r2.Locator32[i] { - return false - } - } - return true -} - -func isDuplicateL64(r1, r2 *L64) bool { - if r1.Preference != r2.Preference { - return false - } - if r1.Locator64 != r2.Locator64 { - return false - } - return true -} - -func isDuplicateLOC(r1, r2 *LOC) bool { - if r1.Version != r2.Version { - return false - } - if r1.Size != r2.Size { - return false - } - if r1.HorizPre != r2.HorizPre { - return false - } - if r1.VertPre != r2.VertPre { - return false - } - if r1.Latitude != r2.Latitude { - return false - } - if r1.Longitude != r2.Longitude { - return false - } - if r1.Altitude != r2.Altitude { - return false - } - return true -} - -func isDuplicateLP(r1, r2 *LP) bool { - if r1.Preference != r2.Preference { - return false - } - if !isDulicateName(r1.Fqdn, r2.Fqdn) { - return false - } - return true -} - -func isDuplicateMB(r1, r2 *MB) bool { - if !isDulicateName(r1.Mb, r2.Mb) { - return false - } - return true -} - -func isDuplicateMD(r1, r2 *MD) bool { - if !isDulicateName(r1.Md, r2.Md) { - return false - } - return true -} - -func isDuplicateMF(r1, r2 *MF) bool { - if !isDulicateName(r1.Mf, r2.Mf) { - return false - } - return true -} - -func isDuplicateMG(r1, r2 *MG) bool { - if !isDulicateName(r1.Mg, r2.Mg) { - return false - } - return true -} - -func isDuplicateMINFO(r1, r2 *MINFO) bool { - if !isDulicateName(r1.Rmail, r2.Rmail) { - return false - } - if !isDulicateName(r1.Email, r2.Email) { - return false - } - return true -} - -func isDuplicateMR(r1, r2 *MR) bool { - if !isDulicateName(r1.Mr, r2.Mr) { - return false - } - return true -} - -func isDuplicateMX(r1, r2 *MX) bool { - if r1.Preference != r2.Preference { - return false - } - if !isDulicateName(r1.Mx, r2.Mx) { - return false - } - return true -} - -func isDuplicateNAPTR(r1, r2 *NAPTR) bool { - if r1.Order != r2.Order { - return false - } - if r1.Preference != r2.Preference { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Service != r2.Service { - return false - } - if r1.Regexp != r2.Regexp { - return false - } - if !isDulicateName(r1.Replacement, r2.Replacement) { - return false - } - return true -} - -func isDuplicateNID(r1, r2 *NID) bool { - if r1.Preference != r2.Preference { - return false - } - if r1.NodeID != r2.NodeID { - return false - } - return true -} - -func isDuplicateNIMLOC(r1, r2 *NIMLOC) bool { - if r1.Locator != r2.Locator { - return false - } - return true -} - -func isDuplicateNINFO(r1, r2 *NINFO) bool { - if len(r1.ZSData) != len(r2.ZSData) { - return false - } - for i := 0; i < len(r1.ZSData); i++ { - if r1.ZSData[i] != r2.ZSData[i] { - return false - } - } - return true -} - -func isDuplicateNS(r1, r2 *NS) bool { - if !isDulicateName(r1.Ns, r2.Ns) { - return false - } - return true -} - -func isDuplicateNSAPPTR(r1, r2 *NSAPPTR) bool { - if !isDulicateName(r1.Ptr, r2.Ptr) { - return false - } - return true -} - -func isDuplicateNSEC(r1, r2 *NSEC) bool { - if !isDulicateName(r1.NextDomain, r2.NextDomain) { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func isDuplicateNSEC3(r1, r2 *NSEC3) bool { - if r1.Hash != r2.Hash { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Iterations != r2.Iterations { - return false - } - if r1.SaltLength != r2.SaltLength { - return false - } - if r1.Salt != r2.Salt { - return false - } - if r1.HashLength != r2.HashLength { - return false - } - if r1.NextDomain != r2.NextDomain { - return false - } - if len(r1.TypeBitMap) != len(r2.TypeBitMap) { - return false - } - for i := 0; i < len(r1.TypeBitMap); i++ { - if r1.TypeBitMap[i] != r2.TypeBitMap[i] { - return false - } - } - return true -} - -func isDuplicateNSEC3PARAM(r1, r2 *NSEC3PARAM) bool { - if r1.Hash != r2.Hash { - return false - } - if r1.Flags != r2.Flags { - return false - } - if r1.Iterations != r2.Iterations { - return false - } - if r1.SaltLength != r2.SaltLength { - return false - } - if r1.Salt != r2.Salt { - return false - } - return true -} - -func isDuplicateOPENPGPKEY(r1, r2 *OPENPGPKEY) bool { - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func isDuplicatePTR(r1, r2 *PTR) bool { - if !isDulicateName(r1.Ptr, r2.Ptr) { - return false - } - return true -} - -func isDuplicatePX(r1, r2 *PX) bool { - if r1.Preference != r2.Preference { - return false - } - if !isDulicateName(r1.Map822, r2.Map822) { - return false - } - if !isDulicateName(r1.Mapx400, r2.Mapx400) { - return false - } - return true -} - -func isDuplicateRKEY(r1, r2 *RKEY) bool { - if r1.Flags != r2.Flags { - return false - } - if r1.Protocol != r2.Protocol { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.PublicKey != r2.PublicKey { - return false - } - return true -} - -func isDuplicateRP(r1, r2 *RP) bool { - if !isDulicateName(r1.Mbox, r2.Mbox) { - return false - } - if !isDulicateName(r1.Txt, r2.Txt) { - return false - } - return true -} - -func isDuplicateRRSIG(r1, r2 *RRSIG) bool { - if r1.TypeCovered != r2.TypeCovered { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Labels != r2.Labels { - return false - } - if r1.OrigTtl != r2.OrigTtl { - return false - } - if r1.Expiration != r2.Expiration { - return false - } - if r1.Inception != r2.Inception { - return false - } - if r1.KeyTag != r2.KeyTag { - return false - } - if !isDulicateName(r1.SignerName, r2.SignerName) { - return false - } - if r1.Signature != r2.Signature { - return false - } - return true -} - -func isDuplicateRT(r1, r2 *RT) bool { - if r1.Preference != r2.Preference { - return false - } - if !isDulicateName(r1.Host, r2.Host) { - return false - } - return true -} - -func isDuplicateSMIMEA(r1, r2 *SMIMEA) bool { - if r1.Usage != r2.Usage { - return false - } - if r1.Selector != r2.Selector { - return false - } - if r1.MatchingType != r2.MatchingType { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func isDuplicateSOA(r1, r2 *SOA) bool { - if !isDulicateName(r1.Ns, r2.Ns) { - return false - } - if !isDulicateName(r1.Mbox, r2.Mbox) { - return false - } - if r1.Serial != r2.Serial { - return false - } - if r1.Refresh != r2.Refresh { - return false - } - if r1.Retry != r2.Retry { - return false - } - if r1.Expire != r2.Expire { - return false - } - if r1.Minttl != r2.Minttl { - return false - } - return true -} - -func isDuplicateSPF(r1, r2 *SPF) bool { - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func isDuplicateSRV(r1, r2 *SRV) bool { - if r1.Priority != r2.Priority { - return false - } - if r1.Weight != r2.Weight { - return false - } - if r1.Port != r2.Port { - return false - } - if !isDulicateName(r1.Target, r2.Target) { - return false - } - return true -} - -func isDuplicateSSHFP(r1, r2 *SSHFP) bool { - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.Type != r2.Type { - return false - } - if r1.FingerPrint != r2.FingerPrint { - return false - } - return true -} - -func isDuplicateTA(r1, r2 *TA) bool { - if r1.KeyTag != r2.KeyTag { - return false - } - if r1.Algorithm != r2.Algorithm { - return false - } - if r1.DigestType != r2.DigestType { - return false - } - if r1.Digest != r2.Digest { - return false - } - return true -} - -func isDuplicateTALINK(r1, r2 *TALINK) bool { - if !isDulicateName(r1.PreviousName, r2.PreviousName) { - return false - } - if !isDulicateName(r1.NextName, r2.NextName) { - return false - } - return true -} - -func isDuplicateTKEY(r1, r2 *TKEY) bool { - if !isDulicateName(r1.Algorithm, r2.Algorithm) { - return false - } - if r1.Inception != r2.Inception { - return false - } - if r1.Expiration != r2.Expiration { - return false - } - if r1.Mode != r2.Mode { - return false - } - if r1.Error != r2.Error { - return false - } - if r1.KeySize != r2.KeySize { - return false - } - if r1.Key != r2.Key { - return false - } - if r1.OtherLen != r2.OtherLen { - return false - } - if r1.OtherData != r2.OtherData { - return false - } - return true -} - -func isDuplicateTLSA(r1, r2 *TLSA) bool { - if r1.Usage != r2.Usage { - return false - } - if r1.Selector != r2.Selector { - return false - } - if r1.MatchingType != r2.MatchingType { - return false - } - if r1.Certificate != r2.Certificate { - return false - } - return true -} - -func isDuplicateTSIG(r1, r2 *TSIG) bool { - if !isDulicateName(r1.Algorithm, r2.Algorithm) { - return false - } - if r1.TimeSigned != r2.TimeSigned { - return false - } - if r1.Fudge != r2.Fudge { - return false - } - if r1.MACSize != r2.MACSize { - return false - } - if r1.MAC != r2.MAC { - return false - } - if r1.OrigId != r2.OrigId { - return false - } - if r1.Error != r2.Error { - return false - } - if r1.OtherLen != r2.OtherLen { - return false - } - if r1.OtherData != r2.OtherData { - return false - } - return true -} - -func isDuplicateTXT(r1, r2 *TXT) bool { - if len(r1.Txt) != len(r2.Txt) { - return false - } - for i := 0; i < len(r1.Txt); i++ { - if r1.Txt[i] != r2.Txt[i] { - return false - } - } - return true -} - -func isDuplicateUID(r1, r2 *UID) bool { - if r1.Uid != r2.Uid { - return false - } - return true -} - -func isDuplicateUINFO(r1, r2 *UINFO) bool { - if r1.Uinfo != r2.Uinfo { - return false - } - return true -} - -func isDuplicateURI(r1, r2 *URI) bool { - if r1.Priority != r2.Priority { - return false - } - if r1.Weight != r2.Weight { - return false - } - if r1.Target != r2.Target { - return false - } - return true -} - -func isDuplicateX25(r1, r2 *X25) bool { - if r1.PSDNAddress != r2.PSDNAddress { - return false - } - return true -} diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go deleted file mode 100644 index 3e93a782f..000000000 --- a/vendor/github.com/miekg/dns/zmsg.go +++ /dev/null @@ -1,3475 +0,0 @@ -// Code generated by "go run msg_generate.go"; DO NOT EDIT. - -package dns - -// pack*() functions - -func (rr *A) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packDataA(rr.A, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *AAAA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packDataAAAA(rr.AAAA, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Subtype, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Hostname, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *AVC) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CAA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Flag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Tag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringOctet(rr.Value, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CDNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CDS) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CERT) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Type, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.Certificate, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Target, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *CSYNC) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *DHCID) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.Digest, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *DLV) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *DNAME) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *DNSKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *DS) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *EID) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Endpoint, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *EUI48) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint48(rr.Address, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *EUI64) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint64(rr.Address, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *GID) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Gid, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *GPOS) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Longitude, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Latitude, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Altitude, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *HINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Cpu, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Os, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *HIP) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.HitLength, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.PublicKeyAlgorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.PublicKeyLength, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Hit, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packDataDomainNames(rr.RendezvousServers, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *KX) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Exchanger, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *L32) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packDataA(rr.Locator32, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *L64) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint64(rr.Locator64, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *LOC) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Version, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Size, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.HorizPre, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.VertPre, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Latitude, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Longitude, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Altitude, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *LP) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Fqdn, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MB) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mb, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MD) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Md, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MF) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mf, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MG) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mg, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Rmail, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Email, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mr, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *MX) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mx, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NAPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Order, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Service, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Regexp, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Replacement, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NID) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint64(rr.NodeID, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NIMLOC) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Locator, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringTxt(rr.ZSData, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NS) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NSAPPTR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Ptr, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NSEC) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.NextDomain, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NSEC3) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return headerEnd, off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return headerEnd, off, err - } - } - off, err = packUint8(rr.HashLength, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase32(rr.NextDomain, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packDataNsec(rr.TypeBitMap, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *NSEC3PARAM) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Hash, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Iterations, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.SaltLength, msg, off) - if err != nil { - return headerEnd, off, err - } - // Only pack salt if value is not "-", i.e. empty - if rr.Salt != "-" { - off, err = packStringHex(rr.Salt, msg, off) - if err != nil { - return headerEnd, off, err - } - } - return headerEnd, off, nil -} - -func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *OPT) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packDataOpt(rr.Option, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *PTR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Ptr, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *PX) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Map822, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mapx400, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *RFC3597) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Rdata, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *RKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Flags, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Protocol, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.PublicKey, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *RP) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mbox, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Txt, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *RRSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *RT) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Preference, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Host, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SIG) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.TypeCovered, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Labels, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.OrigTtl, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.SignerName, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, err = packStringBase64(rr.Signature, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SMIMEA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SOA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Ns, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Mbox, msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Serial, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Refresh, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Retry, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Expire, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Minttl, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SPF) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SRV) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Port, msg, off) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Target, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *SSHFP) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Type, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.FingerPrint, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeyTag, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Algorithm, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.DigestType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Digest, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TALINK) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.PreviousName, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.NextName, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Inception, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Expiration, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Mode, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.KeySize, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Key, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TLSA) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Usage, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.Selector, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint8(rr.MatchingType, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.Certificate, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TSIG) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, _, err = packDomainName(rr.Algorithm, msg, off, compression, false) - if err != nil { - return headerEnd, off, err - } - off, err = packUint48(rr.TimeSigned, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Fudge, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.MACSize, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.MAC, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.OrigId, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Error, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.OtherLen, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringHex(rr.OtherData, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *TXT) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packStringTxt(rr.Txt, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *UID) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint32(rr.Uid, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *UINFO) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.Uinfo, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *URI) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Priority, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packUint16(rr.Weight, msg, off) - if err != nil { - return headerEnd, off, err - } - off, err = packStringOctet(rr.Target, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -func (rr *X25) pack(msg []byte, off int, compression compressionMap, compress bool) (int, int, error) { - headerEnd, off, err := rr.Hdr.pack(msg, off, compression, compress) - if err != nil { - return headerEnd, off, err - } - off, err = packString(rr.PSDNAddress, msg, off) - if err != nil { - return headerEnd, off, err - } - return headerEnd, off, nil -} - -// unpack*() functions - -func unpackA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(A) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.A, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAAAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AAAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.AAAA, off, err = unpackDataAAAA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackAFSDB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AFSDB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Subtype, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hostname, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackANY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(ANY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - return rr, off, err -} - -func unpackAVC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(AVC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCAA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CAA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flag, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Tag, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Value, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CDS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCERT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CERT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Type, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackCSYNC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(CSYNC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDHCID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DHCID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Digest, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDLV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DLV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNAME(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNAME) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDNSKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DNSKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackDS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(DS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Endpoint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI48(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI48) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackEUI64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(EUI64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Address, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Gid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackGPOS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(GPOS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Longitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Cpu, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Os, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackHIP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(HIP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.HitLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyAlgorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKeyLength, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Hit, off, err = unpackStringHex(msg, off, off+int(rr.HitLength)) - if err != nil { - return rr, off, err - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, off+int(rr.PublicKeyLength)) - if err != nil { - return rr, off, err - } - rr.RendezvousServers, off, err = unpackDataDomainNames(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackKX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(KX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Exchanger, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL32(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L32) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator32, off, err = unpackDataA(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackL64(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(L64) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Locator64, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Version, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Size, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.HorizPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.VertPre, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Latitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Longitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Altitude, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackLP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(LP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fqdn, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMB(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MB) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mb, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMD(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MD) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Md, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mf, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mg, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rmail, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Email, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackMX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(MX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mx, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNAPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NAPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Order, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Service, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Regexp, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Replacement, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NodeID, off, err = unpackUint64(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNIMLOC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NIMLOC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Locator, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.ZSData, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNS(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NS) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSAPPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSAPPTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.NextDomain, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - rr.HashLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextDomain, off, err = unpackStringBase32(msg, off, off+int(rr.HashLength)) - if err != nil { - return rr, off, err - } - rr.TypeBitMap, off, err = unpackDataNsec(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackNSEC3PARAM(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(NSEC3PARAM) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Hash, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Flags, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Iterations, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SaltLength, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Salt, off, err = unpackStringHex(msg, off, off+int(rr.SaltLength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPENPGPKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPENPGPKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackOPT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(OPT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Option, off, err = unpackDataOpt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPTR(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PTR) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ptr, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackPX(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(PX) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Map822, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mapx400, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRFC3597(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RFC3597) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Rdata, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Flags, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Protocol, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Txt, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRRSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RRSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackRT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(RT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Preference, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Host, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.TypeCovered, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Labels, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OrigTtl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.SignerName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Signature, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSMIMEA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SMIMEA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSOA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SOA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Ns, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mbox, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Serial, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Refresh, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Retry, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expire, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Minttl, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSPF(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SPF) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSRV(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SRV) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Port, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackSSHFP(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(SSHFP) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Type, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.FingerPrint, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.KeyTag, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Algorithm, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.DigestType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Digest, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTALINK(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TALINK) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PreviousName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.NextName, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTKEY(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TKEY) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Inception, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Expiration, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Mode, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.KeySize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Key, off, err = unpackStringHex(msg, off, off+int(rr.KeySize)) - if err != nil { - return rr, off, err - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTLSA(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TLSA) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Usage, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Selector, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MatchingType, off, err = unpackUint8(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Certificate, off, err = unpackStringHex(msg, off, rdStart+int(rr.Hdr.Rdlength)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTSIG(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TSIG) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Algorithm, off, err = UnpackDomainName(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.TimeSigned, off, err = unpackUint48(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Fudge, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MACSize, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.MAC, off, err = unpackStringHex(msg, off, off+int(rr.MACSize)) - if err != nil { - return rr, off, err - } - rr.OrigId, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Error, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherLen, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.OtherData, off, err = unpackStringHex(msg, off, off+int(rr.OtherLen)) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackTXT(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(TXT) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Txt, off, err = unpackStringTxt(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUID(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UID) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uid, off, err = unpackUint32(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackUINFO(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(UINFO) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Uinfo, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackURI(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(URI) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.Priority, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Weight, off, err = unpackUint16(msg, off) - if err != nil { - return rr, off, err - } - if off == len(msg) { - return rr, off, nil - } - rr.Target, off, err = unpackStringOctet(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -func unpackX25(h RR_Header, msg []byte, off int) (RR, int, error) { - rr := new(X25) - rr.Hdr = h - if noRdata(h) { - return rr, off, nil - } - var err error - rdStart := off - _ = rdStart - - rr.PSDNAddress, off, err = unpackString(msg, off) - if err != nil { - return rr, off, err - } - return rr, off, err -} - -var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){ - TypeA: unpackA, - TypeAAAA: unpackAAAA, - TypeAFSDB: unpackAFSDB, - TypeANY: unpackANY, - TypeAVC: unpackAVC, - TypeCAA: unpackCAA, - TypeCDNSKEY: unpackCDNSKEY, - TypeCDS: unpackCDS, - TypeCERT: unpackCERT, - TypeCNAME: unpackCNAME, - TypeCSYNC: unpackCSYNC, - TypeDHCID: unpackDHCID, - TypeDLV: unpackDLV, - TypeDNAME: unpackDNAME, - TypeDNSKEY: unpackDNSKEY, - TypeDS: unpackDS, - TypeEID: unpackEID, - TypeEUI48: unpackEUI48, - TypeEUI64: unpackEUI64, - TypeGID: unpackGID, - TypeGPOS: unpackGPOS, - TypeHINFO: unpackHINFO, - TypeHIP: unpackHIP, - TypeKEY: unpackKEY, - TypeKX: unpackKX, - TypeL32: unpackL32, - TypeL64: unpackL64, - TypeLOC: unpackLOC, - TypeLP: unpackLP, - TypeMB: unpackMB, - TypeMD: unpackMD, - TypeMF: unpackMF, - TypeMG: unpackMG, - TypeMINFO: unpackMINFO, - TypeMR: unpackMR, - TypeMX: unpackMX, - TypeNAPTR: unpackNAPTR, - TypeNID: unpackNID, - TypeNIMLOC: unpackNIMLOC, - TypeNINFO: unpackNINFO, - TypeNS: unpackNS, - TypeNSAPPTR: unpackNSAPPTR, - TypeNSEC: unpackNSEC, - TypeNSEC3: unpackNSEC3, - TypeNSEC3PARAM: unpackNSEC3PARAM, - TypeOPENPGPKEY: unpackOPENPGPKEY, - TypeOPT: unpackOPT, - TypePTR: unpackPTR, - TypePX: unpackPX, - TypeRKEY: unpackRKEY, - TypeRP: unpackRP, - TypeRRSIG: unpackRRSIG, - TypeRT: unpackRT, - TypeSIG: unpackSIG, - TypeSMIMEA: unpackSMIMEA, - TypeSOA: unpackSOA, - TypeSPF: unpackSPF, - TypeSRV: unpackSRV, - TypeSSHFP: unpackSSHFP, - TypeTA: unpackTA, - TypeTALINK: unpackTALINK, - TypeTKEY: unpackTKEY, - TypeTLSA: unpackTLSA, - TypeTSIG: unpackTSIG, - TypeTXT: unpackTXT, - TypeUID: unpackUID, - TypeUINFO: unpackUINFO, - TypeURI: unpackURI, - TypeX25: unpackX25, -} diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go deleted file mode 100644 index 71662b7b7..000000000 --- a/vendor/github.com/miekg/dns/ztypes.go +++ /dev/null @@ -1,863 +0,0 @@ -// Code generated by "go run types_generate.go"; DO NOT EDIT. - -package dns - -import ( - "encoding/base64" - "net" -) - -// TypeToRR is a map of constructors for each RR type. -var TypeToRR = map[uint16]func() RR{ - TypeA: func() RR { return new(A) }, - TypeAAAA: func() RR { return new(AAAA) }, - TypeAFSDB: func() RR { return new(AFSDB) }, - TypeANY: func() RR { return new(ANY) }, - TypeAVC: func() RR { return new(AVC) }, - TypeCAA: func() RR { return new(CAA) }, - TypeCDNSKEY: func() RR { return new(CDNSKEY) }, - TypeCDS: func() RR { return new(CDS) }, - TypeCERT: func() RR { return new(CERT) }, - TypeCNAME: func() RR { return new(CNAME) }, - TypeCSYNC: func() RR { return new(CSYNC) }, - TypeDHCID: func() RR { return new(DHCID) }, - TypeDLV: func() RR { return new(DLV) }, - TypeDNAME: func() RR { return new(DNAME) }, - TypeDNSKEY: func() RR { return new(DNSKEY) }, - TypeDS: func() RR { return new(DS) }, - TypeEID: func() RR { return new(EID) }, - TypeEUI48: func() RR { return new(EUI48) }, - TypeEUI64: func() RR { return new(EUI64) }, - TypeGID: func() RR { return new(GID) }, - TypeGPOS: func() RR { return new(GPOS) }, - TypeHINFO: func() RR { return new(HINFO) }, - TypeHIP: func() RR { return new(HIP) }, - TypeKEY: func() RR { return new(KEY) }, - TypeKX: func() RR { return new(KX) }, - TypeL32: func() RR { return new(L32) }, - TypeL64: func() RR { return new(L64) }, - TypeLOC: func() RR { return new(LOC) }, - TypeLP: func() RR { return new(LP) }, - TypeMB: func() RR { return new(MB) }, - TypeMD: func() RR { return new(MD) }, - TypeMF: func() RR { return new(MF) }, - TypeMG: func() RR { return new(MG) }, - TypeMINFO: func() RR { return new(MINFO) }, - TypeMR: func() RR { return new(MR) }, - TypeMX: func() RR { return new(MX) }, - TypeNAPTR: func() RR { return new(NAPTR) }, - TypeNID: func() RR { return new(NID) }, - TypeNIMLOC: func() RR { return new(NIMLOC) }, - TypeNINFO: func() RR { return new(NINFO) }, - TypeNS: func() RR { return new(NS) }, - TypeNSAPPTR: func() RR { return new(NSAPPTR) }, - TypeNSEC: func() RR { return new(NSEC) }, - TypeNSEC3: func() RR { return new(NSEC3) }, - TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, - TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, - TypeOPT: func() RR { return new(OPT) }, - TypePTR: func() RR { return new(PTR) }, - TypePX: func() RR { return new(PX) }, - TypeRKEY: func() RR { return new(RKEY) }, - TypeRP: func() RR { return new(RP) }, - TypeRRSIG: func() RR { return new(RRSIG) }, - TypeRT: func() RR { return new(RT) }, - TypeSIG: func() RR { return new(SIG) }, - TypeSMIMEA: func() RR { return new(SMIMEA) }, - TypeSOA: func() RR { return new(SOA) }, - TypeSPF: func() RR { return new(SPF) }, - TypeSRV: func() RR { return new(SRV) }, - TypeSSHFP: func() RR { return new(SSHFP) }, - TypeTA: func() RR { return new(TA) }, - TypeTALINK: func() RR { return new(TALINK) }, - TypeTKEY: func() RR { return new(TKEY) }, - TypeTLSA: func() RR { return new(TLSA) }, - TypeTSIG: func() RR { return new(TSIG) }, - TypeTXT: func() RR { return new(TXT) }, - TypeUID: func() RR { return new(UID) }, - TypeUINFO: func() RR { return new(UINFO) }, - TypeURI: func() RR { return new(URI) }, - TypeX25: func() RR { return new(X25) }, -} - -// TypeToString is a map of strings for each RR type. -var TypeToString = map[uint16]string{ - TypeA: "A", - TypeAAAA: "AAAA", - TypeAFSDB: "AFSDB", - TypeANY: "ANY", - TypeATMA: "ATMA", - TypeAVC: "AVC", - TypeAXFR: "AXFR", - TypeCAA: "CAA", - TypeCDNSKEY: "CDNSKEY", - TypeCDS: "CDS", - TypeCERT: "CERT", - TypeCNAME: "CNAME", - TypeCSYNC: "CSYNC", - TypeDHCID: "DHCID", - TypeDLV: "DLV", - TypeDNAME: "DNAME", - TypeDNSKEY: "DNSKEY", - TypeDS: "DS", - TypeEID: "EID", - TypeEUI48: "EUI48", - TypeEUI64: "EUI64", - TypeGID: "GID", - TypeGPOS: "GPOS", - TypeHINFO: "HINFO", - TypeHIP: "HIP", - TypeISDN: "ISDN", - TypeIXFR: "IXFR", - TypeKEY: "KEY", - TypeKX: "KX", - TypeL32: "L32", - TypeL64: "L64", - TypeLOC: "LOC", - TypeLP: "LP", - TypeMAILA: "MAILA", - TypeMAILB: "MAILB", - TypeMB: "MB", - TypeMD: "MD", - TypeMF: "MF", - TypeMG: "MG", - TypeMINFO: "MINFO", - TypeMR: "MR", - TypeMX: "MX", - TypeNAPTR: "NAPTR", - TypeNID: "NID", - TypeNIMLOC: "NIMLOC", - TypeNINFO: "NINFO", - TypeNS: "NS", - TypeNSEC: "NSEC", - TypeNSEC3: "NSEC3", - TypeNSEC3PARAM: "NSEC3PARAM", - TypeNULL: "NULL", - TypeNXT: "NXT", - TypeNone: "None", - TypeOPENPGPKEY: "OPENPGPKEY", - TypeOPT: "OPT", - TypePTR: "PTR", - TypePX: "PX", - TypeRKEY: "RKEY", - TypeRP: "RP", - TypeRRSIG: "RRSIG", - TypeRT: "RT", - TypeReserved: "Reserved", - TypeSIG: "SIG", - TypeSMIMEA: "SMIMEA", - TypeSOA: "SOA", - TypeSPF: "SPF", - TypeSRV: "SRV", - TypeSSHFP: "SSHFP", - TypeTA: "TA", - TypeTALINK: "TALINK", - TypeTKEY: "TKEY", - TypeTLSA: "TLSA", - TypeTSIG: "TSIG", - TypeTXT: "TXT", - TypeUID: "UID", - TypeUINFO: "UINFO", - TypeUNSPEC: "UNSPEC", - TypeURI: "URI", - TypeX25: "X25", - TypeNSAPPTR: "NSAP-PTR", -} - -func (rr *A) Header() *RR_Header { return &rr.Hdr } -func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } -func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } -func (rr *ANY) Header() *RR_Header { return &rr.Hdr } -func (rr *AVC) Header() *RR_Header { return &rr.Hdr } -func (rr *CAA) Header() *RR_Header { return &rr.Hdr } -func (rr *CDNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *CDS) Header() *RR_Header { return &rr.Hdr } -func (rr *CERT) Header() *RR_Header { return &rr.Hdr } -func (rr *CNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *CSYNC) Header() *RR_Header { return &rr.Hdr } -func (rr *DHCID) Header() *RR_Header { return &rr.Hdr } -func (rr *DLV) Header() *RR_Header { return &rr.Hdr } -func (rr *DNAME) Header() *RR_Header { return &rr.Hdr } -func (rr *DNSKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *DS) Header() *RR_Header { return &rr.Hdr } -func (rr *EID) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI48) Header() *RR_Header { return &rr.Hdr } -func (rr *EUI64) Header() *RR_Header { return &rr.Hdr } -func (rr *GID) Header() *RR_Header { return &rr.Hdr } -func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } -func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *HIP) Header() *RR_Header { return &rr.Hdr } -func (rr *KEY) Header() *RR_Header { return &rr.Hdr } -func (rr *KX) Header() *RR_Header { return &rr.Hdr } -func (rr *L32) Header() *RR_Header { return &rr.Hdr } -func (rr *L64) Header() *RR_Header { return &rr.Hdr } -func (rr *LOC) Header() *RR_Header { return &rr.Hdr } -func (rr *LP) Header() *RR_Header { return &rr.Hdr } -func (rr *MB) Header() *RR_Header { return &rr.Hdr } -func (rr *MD) Header() *RR_Header { return &rr.Hdr } -func (rr *MF) Header() *RR_Header { return &rr.Hdr } -func (rr *MG) Header() *RR_Header { return &rr.Hdr } -func (rr *MINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *MR) Header() *RR_Header { return &rr.Hdr } -func (rr *MX) Header() *RR_Header { return &rr.Hdr } -func (rr *NAPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NID) Header() *RR_Header { return &rr.Hdr } -func (rr *NIMLOC) Header() *RR_Header { return &rr.Hdr } -func (rr *NINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *NS) Header() *RR_Header { return &rr.Hdr } -func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } -func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } -func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *OPT) Header() *RR_Header { return &rr.Hdr } -func (rr *PTR) Header() *RR_Header { return &rr.Hdr } -func (rr *PX) Header() *RR_Header { return &rr.Hdr } -func (rr *RFC3597) Header() *RR_Header { return &rr.Hdr } -func (rr *RKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *RP) Header() *RR_Header { return &rr.Hdr } -func (rr *RRSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *RT) Header() *RR_Header { return &rr.Hdr } -func (rr *SIG) Header() *RR_Header { return &rr.Hdr } -func (rr *SMIMEA) Header() *RR_Header { return &rr.Hdr } -func (rr *SOA) Header() *RR_Header { return &rr.Hdr } -func (rr *SPF) Header() *RR_Header { return &rr.Hdr } -func (rr *SRV) Header() *RR_Header { return &rr.Hdr } -func (rr *SSHFP) Header() *RR_Header { return &rr.Hdr } -func (rr *TA) Header() *RR_Header { return &rr.Hdr } -func (rr *TALINK) Header() *RR_Header { return &rr.Hdr } -func (rr *TKEY) Header() *RR_Header { return &rr.Hdr } -func (rr *TLSA) Header() *RR_Header { return &rr.Hdr } -func (rr *TSIG) Header() *RR_Header { return &rr.Hdr } -func (rr *TXT) Header() *RR_Header { return &rr.Hdr } -func (rr *UID) Header() *RR_Header { return &rr.Hdr } -func (rr *UINFO) Header() *RR_Header { return &rr.Hdr } -func (rr *URI) Header() *RR_Header { return &rr.Hdr } -func (rr *X25) Header() *RR_Header { return &rr.Hdr } - -// len() functions -func (rr *A) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += net.IPv4len // A - return l -} -func (rr *AAAA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += net.IPv6len // AAAA - return l -} -func (rr *AFSDB) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Subtype - l += domainNameLen(rr.Hostname, off+l, compression, false) - return l -} -func (rr *ANY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - return l -} -func (rr *AVC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *CAA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Flag - l += len(rr.Tag) + 1 - l += len(rr.Value) - return l -} -func (rr *CERT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Type - l += 2 // KeyTag - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) - return l -} -func (rr *CNAME) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Target, off+l, compression, true) - return l -} -func (rr *DHCID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += base64.StdEncoding.DecodedLen(len(rr.Digest)) - return l -} -func (rr *DNAME) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Target, off+l, compression, false) - return l -} -func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *DS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *EID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Endpoint)/2 + 1 - return l -} -func (rr *EUI48) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 6 // Address - return l -} -func (rr *EUI64) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 8 // Address - return l -} -func (rr *GID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 // Gid - return l -} -func (rr *GPOS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Longitude) + 1 - l += len(rr.Latitude) + 1 - l += len(rr.Altitude) + 1 - return l -} -func (rr *HINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Cpu) + 1 - l += len(rr.Os) + 1 - return l -} -func (rr *HIP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // HitLength - l++ // PublicKeyAlgorithm - l += 2 // PublicKeyLength - l += len(rr.Hit) / 2 - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - for _, x := range rr.RendezvousServers { - l += domainNameLen(x, off+l, compression, false) - } - return l -} -func (rr *KX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Exchanger, off+l, compression, false) - return l -} -func (rr *L32) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += net.IPv4len // Locator32 - return l -} -func (rr *L64) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += 8 // Locator64 - return l -} -func (rr *LOC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Version - l++ // Size - l++ // HorizPre - l++ // VertPre - l += 4 // Latitude - l += 4 // Longitude - l += 4 // Altitude - return l -} -func (rr *LP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Fqdn, off+l, compression, false) - return l -} -func (rr *MB) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mb, off+l, compression, true) - return l -} -func (rr *MD) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Md, off+l, compression, true) - return l -} -func (rr *MF) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mf, off+l, compression, true) - return l -} -func (rr *MG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mg, off+l, compression, true) - return l -} -func (rr *MINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Rmail, off+l, compression, true) - l += domainNameLen(rr.Email, off+l, compression, true) - return l -} -func (rr *MR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mr, off+l, compression, true) - return l -} -func (rr *MX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Mx, off+l, compression, true) - return l -} -func (rr *NAPTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Order - l += 2 // Preference - l += len(rr.Flags) + 1 - l += len(rr.Service) + 1 - l += len(rr.Regexp) + 1 - l += domainNameLen(rr.Replacement, off+l, compression, false) - return l -} -func (rr *NID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += 8 // NodeID - return l -} -func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Locator)/2 + 1 - return l -} -func (rr *NINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.ZSData { - l += len(x) + 1 - } - return l -} -func (rr *NS) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ns, off+l, compression, true) - return l -} -func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ptr, off+l, compression, false) - return l -} -func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Hash - l++ // Flags - l += 2 // Iterations - l++ // SaltLength - l += len(rr.Salt) / 2 - return l -} -func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *PTR) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ptr, off+l, compression, true) - return l -} -func (rr *PX) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Map822, off+l, compression, false) - l += domainNameLen(rr.Mapx400, off+l, compression, false) - return l -} -func (rr *RFC3597) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Rdata)/2 + 1 - return l -} -func (rr *RKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Flags - l++ // Protocol - l++ // Algorithm - l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) - return l -} -func (rr *RP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Mbox, off+l, compression, false) - l += domainNameLen(rr.Txt, off+l, compression, false) - return l -} -func (rr *RRSIG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // TypeCovered - l++ // Algorithm - l++ // Labels - l += 4 // OrigTtl - l += 4 // Expiration - l += 4 // Inception - l += 2 // KeyTag - l += domainNameLen(rr.SignerName, off+l, compression, false) - l += base64.StdEncoding.DecodedLen(len(rr.Signature)) - return l -} -func (rr *RT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Preference - l += domainNameLen(rr.Host, off+l, compression, false) - return l -} -func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *SOA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Ns, off+l, compression, true) - l += domainNameLen(rr.Mbox, off+l, compression, true) - l += 4 // Serial - l += 4 // Refresh - l += 4 // Retry - l += 4 // Expire - l += 4 // Minttl - return l -} -func (rr *SPF) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *SRV) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Priority - l += 2 // Weight - l += 2 // Port - l += domainNameLen(rr.Target, off+l, compression, false) - return l -} -func (rr *SSHFP) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Algorithm - l++ // Type - l += len(rr.FingerPrint)/2 + 1 - return l -} -func (rr *TA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // KeyTag - l++ // Algorithm - l++ // DigestType - l += len(rr.Digest)/2 + 1 - return l -} -func (rr *TALINK) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.PreviousName, off+l, compression, false) - l += domainNameLen(rr.NextName, off+l, compression, false) - return l -} -func (rr *TKEY) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Algorithm, off+l, compression, false) - l += 4 // Inception - l += 4 // Expiration - l += 2 // Mode - l += 2 // Error - l += 2 // KeySize - l += len(rr.Key) / 2 - l += 2 // OtherLen - l += len(rr.OtherData) / 2 - return l -} -func (rr *TLSA) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l++ // Usage - l++ // Selector - l++ // MatchingType - l += len(rr.Certificate)/2 + 1 - return l -} -func (rr *TSIG) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += domainNameLen(rr.Algorithm, off+l, compression, false) - l += 6 // TimeSigned - l += 2 // Fudge - l += 2 // MACSize - l += len(rr.MAC) / 2 - l += 2 // OrigId - l += 2 // Error - l += 2 // OtherLen - l += len(rr.OtherData) / 2 - return l -} -func (rr *TXT) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - for _, x := range rr.Txt { - l += len(x) + 1 - } - return l -} -func (rr *UID) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 4 // Uid - return l -} -func (rr *UINFO) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.Uinfo) + 1 - return l -} -func (rr *URI) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += 2 // Priority - l += 2 // Weight - l += len(rr.Target) - return l -} -func (rr *X25) len(off int, compression map[string]struct{}) int { - l := rr.Hdr.len(off, compression) - l += len(rr.PSDNAddress) + 1 - return l -} - -// copy() functions -func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} -} -func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} -} -func (rr *AFSDB) copy() RR { - return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} -} -func (rr *ANY) copy() RR { - return &ANY{rr.Hdr} -} -func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} -} -func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} -} -func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} -} -func (rr *CNAME) copy() RR { - return &CNAME{rr.Hdr, rr.Target} -} -func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} -} -func (rr *DHCID) copy() RR { - return &DHCID{rr.Hdr, rr.Digest} -} -func (rr *DNAME) copy() RR { - return &DNAME{rr.Hdr, rr.Target} -} -func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *EID) copy() RR { - return &EID{rr.Hdr, rr.Endpoint} -} -func (rr *EUI48) copy() RR { - return &EUI48{rr.Hdr, rr.Address} -} -func (rr *EUI64) copy() RR { - return &EUI64{rr.Hdr, rr.Address} -} -func (rr *GID) copy() RR { - return &GID{rr.Hdr, rr.Gid} -} -func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} -} -func (rr *HINFO) copy() RR { - return &HINFO{rr.Hdr, rr.Cpu, rr.Os} -} -func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} -} -func (rr *KX) copy() RR { - return &KX{rr.Hdr, rr.Preference, rr.Exchanger} -} -func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} -} -func (rr *L64) copy() RR { - return &L64{rr.Hdr, rr.Preference, rr.Locator64} -} -func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} -} -func (rr *LP) copy() RR { - return &LP{rr.Hdr, rr.Preference, rr.Fqdn} -} -func (rr *MB) copy() RR { - return &MB{rr.Hdr, rr.Mb} -} -func (rr *MD) copy() RR { - return &MD{rr.Hdr, rr.Md} -} -func (rr *MF) copy() RR { - return &MF{rr.Hdr, rr.Mf} -} -func (rr *MG) copy() RR { - return &MG{rr.Hdr, rr.Mg} -} -func (rr *MINFO) copy() RR { - return &MINFO{rr.Hdr, rr.Rmail, rr.Email} -} -func (rr *MR) copy() RR { - return &MR{rr.Hdr, rr.Mr} -} -func (rr *MX) copy() RR { - return &MX{rr.Hdr, rr.Preference, rr.Mx} -} -func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} -} -func (rr *NID) copy() RR { - return &NID{rr.Hdr, rr.Preference, rr.NodeID} -} -func (rr *NIMLOC) copy() RR { - return &NIMLOC{rr.Hdr, rr.Locator} -} -func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} -} -func (rr *NS) copy() RR { - return &NS{rr.Hdr, rr.Ns} -} -func (rr *NSAPPTR) copy() RR { - return &NSAPPTR{rr.Hdr, rr.Ptr} -} -func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} -} -func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} -} -func (rr *OPENPGPKEY) copy() RR { - return &OPENPGPKEY{rr.Hdr, rr.PublicKey} -} -func (rr *OPT) copy() RR { - Option := make([]EDNS0, len(rr.Option)) - copy(Option, rr.Option) - return &OPT{rr.Hdr, Option} -} -func (rr *PTR) copy() RR { - return &PTR{rr.Hdr, rr.Ptr} -} -func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} -} -func (rr *RFC3597) copy() RR { - return &RFC3597{rr.Hdr, rr.Rdata} -} -func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} -} -func (rr *RP) copy() RR { - return &RP{rr.Hdr, rr.Mbox, rr.Txt} -} -func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} -} -func (rr *RT) copy() RR { - return &RT{rr.Hdr, rr.Preference, rr.Host} -} -func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} -} -func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} -} -func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} -} -func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} -} -func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} -} -func (rr *TALINK) copy() RR { - return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} -} -func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} -} -func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} -} -func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} -} -func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} -} -func (rr *UID) copy() RR { - return &UID{rr.Hdr, rr.Uid} -} -func (rr *UINFO) copy() RR { - return &UINFO{rr.Hdr, rr.Uinfo} -} -func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} -} -func (rr *X25) copy() RR { - return &X25{rr.Hdr, rr.PSDNAddress} -} diff --git a/vendor/golang.org/x/net/bpf/asm.go b/vendor/golang.org/x/net/bpf/asm.go deleted file mode 100644 index 15e21b181..000000000 --- a/vendor/golang.org/x/net/bpf/asm.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import "fmt" - -// Assemble converts insts into raw instructions suitable for loading -// into a BPF virtual machine. -// -// Currently, no optimization is attempted, the assembled program flow -// is exactly as provided. -func Assemble(insts []Instruction) ([]RawInstruction, error) { - ret := make([]RawInstruction, len(insts)) - var err error - for i, inst := range insts { - ret[i], err = inst.Assemble() - if err != nil { - return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err) - } - } - return ret, nil -} - -// Disassemble attempts to parse raw back into -// Instructions. Unrecognized RawInstructions are assumed to be an -// extension not implemented by this package, and are passed through -// unchanged to the output. The allDecoded value reports whether insts -// contains no RawInstructions. -func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) { - insts = make([]Instruction, len(raw)) - allDecoded = true - for i, r := range raw { - insts[i] = r.Disassemble() - if _, ok := insts[i].(RawInstruction); ok { - allDecoded = false - } - } - return insts, allDecoded -} diff --git a/vendor/golang.org/x/net/bpf/constants.go b/vendor/golang.org/x/net/bpf/constants.go deleted file mode 100644 index 12f3ee835..000000000 --- a/vendor/golang.org/x/net/bpf/constants.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -// A Register is a register of the BPF virtual machine. -type Register uint16 - -const ( - // RegA is the accumulator register. RegA is always the - // destination register of ALU operations. - RegA Register = iota - // RegX is the indirection register, used by LoadIndirect - // operations. - RegX -) - -// An ALUOp is an arithmetic or logic operation. -type ALUOp uint16 - -// ALU binary operation types. -const ( - ALUOpAdd ALUOp = iota << 4 - ALUOpSub - ALUOpMul - ALUOpDiv - ALUOpOr - ALUOpAnd - ALUOpShiftLeft - ALUOpShiftRight - aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type. - ALUOpMod - ALUOpXor -) - -// A JumpTest is a comparison operator used in conditional jumps. -type JumpTest uint16 - -// Supported operators for conditional jumps. -// K can be RegX for JumpIfX -const ( - // K == A - JumpEqual JumpTest = iota - // K != A - JumpNotEqual - // K > A - JumpGreaterThan - // K < A - JumpLessThan - // K >= A - JumpGreaterOrEqual - // K <= A - JumpLessOrEqual - // K & A != 0 - JumpBitsSet - // K & A == 0 - JumpBitsNotSet -) - -// An Extension is a function call provided by the kernel that -// performs advanced operations that are expensive or impossible -// within the BPF virtual machine. -// -// Extensions are only implemented by the Linux kernel. -// -// TODO: should we prune this list? Some of these extensions seem -// either broken or near-impossible to use correctly, whereas other -// (len, random, ifindex) are quite useful. -type Extension int - -// Extension functions available in the Linux kernel. -const ( - // extOffset is the negative maximum number of instructions used - // to load instructions by overloading the K argument. - extOffset = -0x1000 - // ExtLen returns the length of the packet. - ExtLen Extension = 1 - // ExtProto returns the packet's L3 protocol type. - ExtProto Extension = 0 - // ExtType returns the packet's type (skb->pkt_type in the kernel) - // - // TODO: better documentation. How nice an API do we want to - // provide for these esoteric extensions? - ExtType Extension = 4 - // ExtPayloadOffset returns the offset of the packet payload, or - // the first protocol header that the kernel does not know how to - // parse. - ExtPayloadOffset Extension = 52 - // ExtInterfaceIndex returns the index of the interface on which - // the packet was received. - ExtInterfaceIndex Extension = 8 - // ExtNetlinkAttr returns the netlink attribute of type X at - // offset A. - ExtNetlinkAttr Extension = 12 - // ExtNetlinkAttrNested returns the nested netlink attribute of - // type X at offset A. - ExtNetlinkAttrNested Extension = 16 - // ExtMark returns the packet's mark value. - ExtMark Extension = 20 - // ExtQueue returns the packet's assigned hardware queue. - ExtQueue Extension = 24 - // ExtLinkLayerType returns the packet's hardware address type - // (e.g. Ethernet, Infiniband). - ExtLinkLayerType Extension = 28 - // ExtRXHash returns the packets receive hash. - // - // TODO: figure out what this rxhash actually is. - ExtRXHash Extension = 32 - // ExtCPUID returns the ID of the CPU processing the current - // packet. - ExtCPUID Extension = 36 - // ExtVLANTag returns the packet's VLAN tag. - ExtVLANTag Extension = 44 - // ExtVLANTagPresent returns non-zero if the packet has a VLAN - // tag. - // - // TODO: I think this might be a lie: it reads bit 0x1000 of the - // VLAN header, which changed meaning in recent revisions of the - // spec - this extension may now return meaningless information. - ExtVLANTagPresent Extension = 48 - // ExtVLANProto returns 0x8100 if the frame has a VLAN header, - // 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some - // other value if no VLAN information is present. - ExtVLANProto Extension = 60 - // ExtRand returns a uniformly random uint32. - ExtRand Extension = 56 -) - -// The following gives names to various bit patterns used in opcode construction. - -const ( - opMaskCls uint16 = 0x7 - // opClsLoad masks - opMaskLoadDest = 0x01 - opMaskLoadWidth = 0x18 - opMaskLoadMode = 0xe0 - // opClsALU & opClsJump - opMaskOperand = 0x08 - opMaskOperator = 0xf0 -) - -const ( - // +---------------+-----------------+---+---+---+ - // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 0 | - // +---------------+-----------------+---+---+---+ - opClsLoadA uint16 = iota - // +---------------+-----------------+---+---+---+ - // | AddrMode (3b) | LoadWidth (2b) | 0 | 0 | 1 | - // +---------------+-----------------+---+---+---+ - opClsLoadX - // +---+---+---+---+---+---+---+---+ - // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | - // +---+---+---+---+---+---+---+---+ - opClsStoreA - // +---+---+---+---+---+---+---+---+ - // | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | - // +---+---+---+---+---+---+---+---+ - opClsStoreX - // +---------------+-----------------+---+---+---+ - // | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 | - // +---------------+-----------------+---+---+---+ - opClsALU - // +-----------------------------+---+---+---+---+ - // | TestOperator (4b) | 0 | 1 | 0 | 1 | - // +-----------------------------+---+---+---+---+ - opClsJump - // +---+-------------------------+---+---+---+---+ - // | 0 | 0 | 0 | RetSrc (1b) | 0 | 1 | 1 | 0 | - // +---+-------------------------+---+---+---+---+ - opClsReturn - // +---+-------------------------+---+---+---+---+ - // | 0 | 0 | 0 | TXAorTAX (1b) | 0 | 1 | 1 | 1 | - // +---+-------------------------+---+---+---+---+ - opClsMisc -) - -const ( - opAddrModeImmediate uint16 = iota << 5 - opAddrModeAbsolute - opAddrModeIndirect - opAddrModeScratch - opAddrModePacketLen // actually an extension, not an addressing mode. - opAddrModeMemShift -) - -const ( - opLoadWidth4 uint16 = iota << 3 - opLoadWidth2 - opLoadWidth1 -) - -// Operand for ALU and Jump instructions -type opOperand uint16 - -// Supported operand sources. -const ( - opOperandConstant opOperand = iota << 3 - opOperandX -) - -// An jumpOp is a conditional jump condition. -type jumpOp uint16 - -// Supported jump conditions. -const ( - opJumpAlways jumpOp = iota << 4 - opJumpEqual - opJumpGT - opJumpGE - opJumpSet -) - -const ( - opRetSrcConstant uint16 = iota << 4 - opRetSrcA -) - -const ( - opMiscTAX = 0x00 - opMiscTXA = 0x80 -) diff --git a/vendor/golang.org/x/net/bpf/doc.go b/vendor/golang.org/x/net/bpf/doc.go deleted file mode 100644 index ae62feb53..000000000 --- a/vendor/golang.org/x/net/bpf/doc.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* - -Package bpf implements marshaling and unmarshaling of programs for the -Berkeley Packet Filter virtual machine, and provides a Go implementation -of the virtual machine. - -BPF's main use is to specify a packet filter for network taps, so that -the kernel doesn't have to expensively copy every packet it sees to -userspace. However, it's been repurposed to other areas where running -user code in-kernel is needed. For example, Linux's seccomp uses BPF -to apply security policies to system calls. For simplicity, this -documentation refers only to packets, but other uses of BPF have their -own data payloads. - -BPF programs run in a restricted virtual machine. It has almost no -access to kernel functions, and while conditional branches are -allowed, they can only jump forwards, to guarantee that there are no -infinite loops. - -The virtual machine - -The BPF VM is an accumulator machine. Its main register, called -register A, is an implicit source and destination in all arithmetic -and logic operations. The machine also has 16 scratch registers for -temporary storage, and an indirection register (register X) for -indirect memory access. All registers are 32 bits wide. - -Each run of a BPF program is given one packet, which is placed in the -VM's read-only "main memory". LoadAbsolute and LoadIndirect -instructions can fetch up to 32 bits at a time into register A for -examination. - -The goal of a BPF program is to produce and return a verdict (uint32), -which tells the kernel what to do with the packet. In the context of -packet filtering, the returned value is the number of bytes of the -packet to forward to userspace, or 0 to ignore the packet. Other -contexts like seccomp define their own return values. - -In order to simplify programs, attempts to read past the end of the -packet terminate the program execution with a verdict of 0 (ignore -packet). This means that the vast majority of BPF programs don't need -to do any explicit bounds checking. - -In addition to the bytes of the packet, some BPF programs have access -to extensions, which are essentially calls to kernel utility -functions. Currently, the only extensions supported by this package -are the Linux packet filter extensions. - -Examples - -This packet filter selects all ARP packets. - - bpf.Assemble([]bpf.Instruction{ - // Load "EtherType" field from the ethernet header. - bpf.LoadAbsolute{Off: 12, Size: 2}, - // Skip over the next instruction if EtherType is not ARP. - bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1}, - // Verdict is "send up to 4k of the packet to userspace." - bpf.RetConstant{Val: 4096}, - // Verdict is "ignore packet." - bpf.RetConstant{Val: 0}, - }) - -This packet filter captures a random 1% sample of traffic. - - bpf.Assemble([]bpf.Instruction{ - // Get a 32-bit random number from the Linux kernel. - bpf.LoadExtension{Num: bpf.ExtRand}, - // 1% dice roll? - bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1}, - // Capture. - bpf.RetConstant{Val: 4096}, - // Ignore. - bpf.RetConstant{Val: 0}, - }) - -*/ -package bpf // import "golang.org/x/net/bpf" diff --git a/vendor/golang.org/x/net/bpf/instructions.go b/vendor/golang.org/x/net/bpf/instructions.go deleted file mode 100644 index 3cffcaa01..000000000 --- a/vendor/golang.org/x/net/bpf/instructions.go +++ /dev/null @@ -1,726 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import "fmt" - -// An Instruction is one instruction executed by the BPF virtual -// machine. -type Instruction interface { - // Assemble assembles the Instruction into a RawInstruction. - Assemble() (RawInstruction, error) -} - -// A RawInstruction is a raw BPF virtual machine instruction. -type RawInstruction struct { - // Operation to execute. - Op uint16 - // For conditional jump instructions, the number of instructions - // to skip if the condition is true/false. - Jt uint8 - Jf uint8 - // Constant parameter. The meaning depends on the Op. - K uint32 -} - -// Assemble implements the Instruction Assemble method. -func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil } - -// Disassemble parses ri into an Instruction and returns it. If ri is -// not recognized by this package, ri itself is returned. -func (ri RawInstruction) Disassemble() Instruction { - switch ri.Op & opMaskCls { - case opClsLoadA, opClsLoadX: - reg := Register(ri.Op & opMaskLoadDest) - sz := 0 - switch ri.Op & opMaskLoadWidth { - case opLoadWidth4: - sz = 4 - case opLoadWidth2: - sz = 2 - case opLoadWidth1: - sz = 1 - default: - return ri - } - switch ri.Op & opMaskLoadMode { - case opAddrModeImmediate: - if sz != 4 { - return ri - } - return LoadConstant{Dst: reg, Val: ri.K} - case opAddrModeScratch: - if sz != 4 || ri.K > 15 { - return ri - } - return LoadScratch{Dst: reg, N: int(ri.K)} - case opAddrModeAbsolute: - if ri.K > extOffset+0xffffffff { - return LoadExtension{Num: Extension(-extOffset + ri.K)} - } - return LoadAbsolute{Size: sz, Off: ri.K} - case opAddrModeIndirect: - return LoadIndirect{Size: sz, Off: ri.K} - case opAddrModePacketLen: - if sz != 4 { - return ri - } - return LoadExtension{Num: ExtLen} - case opAddrModeMemShift: - return LoadMemShift{Off: ri.K} - default: - return ri - } - - case opClsStoreA: - if ri.Op != opClsStoreA || ri.K > 15 { - return ri - } - return StoreScratch{Src: RegA, N: int(ri.K)} - - case opClsStoreX: - if ri.Op != opClsStoreX || ri.K > 15 { - return ri - } - return StoreScratch{Src: RegX, N: int(ri.K)} - - case opClsALU: - switch op := ALUOp(ri.Op & opMaskOperator); op { - case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor: - switch operand := opOperand(ri.Op & opMaskOperand); operand { - case opOperandX: - return ALUOpX{Op: op} - case opOperandConstant: - return ALUOpConstant{Op: op, Val: ri.K} - default: - return ri - } - case aluOpNeg: - return NegateA{} - default: - return ri - } - - case opClsJump: - switch op := jumpOp(ri.Op & opMaskOperator); op { - case opJumpAlways: - return Jump{Skip: ri.K} - case opJumpEqual, opJumpGT, opJumpGE, opJumpSet: - cond, skipTrue, skipFalse := jumpOpToTest(op, ri.Jt, ri.Jf) - switch operand := opOperand(ri.Op & opMaskOperand); operand { - case opOperandX: - return JumpIfX{Cond: cond, SkipTrue: skipTrue, SkipFalse: skipFalse} - case opOperandConstant: - return JumpIf{Cond: cond, Val: ri.K, SkipTrue: skipTrue, SkipFalse: skipFalse} - default: - return ri - } - default: - return ri - } - - case opClsReturn: - switch ri.Op { - case opClsReturn | opRetSrcA: - return RetA{} - case opClsReturn | opRetSrcConstant: - return RetConstant{Val: ri.K} - default: - return ri - } - - case opClsMisc: - switch ri.Op { - case opClsMisc | opMiscTAX: - return TAX{} - case opClsMisc | opMiscTXA: - return TXA{} - default: - return ri - } - - default: - panic("unreachable") // switch is exhaustive on the bit pattern - } -} - -func jumpOpToTest(op jumpOp, skipTrue uint8, skipFalse uint8) (JumpTest, uint8, uint8) { - var test JumpTest - - // Decode "fake" jump conditions that don't appear in machine code - // Ensures the Assemble -> Disassemble stage recreates the same instructions - // See https://github.com/golang/go/issues/18470 - if skipTrue == 0 { - switch op { - case opJumpEqual: - test = JumpNotEqual - case opJumpGT: - test = JumpLessOrEqual - case opJumpGE: - test = JumpLessThan - case opJumpSet: - test = JumpBitsNotSet - } - - return test, skipFalse, 0 - } - - switch op { - case opJumpEqual: - test = JumpEqual - case opJumpGT: - test = JumpGreaterThan - case opJumpGE: - test = JumpGreaterOrEqual - case opJumpSet: - test = JumpBitsSet - } - - return test, skipTrue, skipFalse -} - -// LoadConstant loads Val into register Dst. -type LoadConstant struct { - Dst Register - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadConstant) Assemble() (RawInstruction, error) { - return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) -} - -// String returns the instruction in assembler notation. -func (a LoadConstant) String() string { - switch a.Dst { - case RegA: - return fmt.Sprintf("ld #%d", a.Val) - case RegX: - return fmt.Sprintf("ldx #%d", a.Val) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadScratch loads scratch[N] into register Dst. -type LoadScratch struct { - Dst Register - N int // 0-15 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadScratch) Assemble() (RawInstruction, error) { - if a.N < 0 || a.N > 15 { - return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) - } - return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) -} - -// String returns the instruction in assembler notation. -func (a LoadScratch) String() string { - switch a.Dst { - case RegA: - return fmt.Sprintf("ld M[%d]", a.N) - case RegX: - return fmt.Sprintf("ldx M[%d]", a.N) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadAbsolute loads packet[Off:Off+Size] as an integer value into -// register A. -type LoadAbsolute struct { - Off uint32 - Size int // 1, 2 or 4 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadAbsolute) Assemble() (RawInstruction, error) { - return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadAbsolute) String() string { - switch a.Size { - case 1: // byte - return fmt.Sprintf("ldb [%d]", a.Off) - case 2: // half word - return fmt.Sprintf("ldh [%d]", a.Off) - case 4: // word - if a.Off > extOffset+0xffffffff { - return LoadExtension{Num: Extension(a.Off + 0x1000)}.String() - } - return fmt.Sprintf("ld [%d]", a.Off) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value -// into register A. -type LoadIndirect struct { - Off uint32 - Size int // 1, 2 or 4 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadIndirect) Assemble() (RawInstruction, error) { - return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadIndirect) String() string { - switch a.Size { - case 1: // byte - return fmt.Sprintf("ldb [x + %d]", a.Off) - case 2: // half word - return fmt.Sprintf("ldh [x + %d]", a.Off) - case 4: // word - return fmt.Sprintf("ld [x + %d]", a.Off) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// LoadMemShift multiplies the first 4 bits of the byte at packet[Off] -// by 4 and stores the result in register X. -// -// This instruction is mainly useful to load into X the length of an -// IPv4 packet header in a single instruction, rather than have to do -// the arithmetic on the header's first byte by hand. -type LoadMemShift struct { - Off uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a LoadMemShift) Assemble() (RawInstruction, error) { - return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) -} - -// String returns the instruction in assembler notation. -func (a LoadMemShift) String() string { - return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) -} - -// LoadExtension invokes a linux-specific extension and stores the -// result in register A. -type LoadExtension struct { - Num Extension -} - -// Assemble implements the Instruction Assemble method. -func (a LoadExtension) Assemble() (RawInstruction, error) { - if a.Num == ExtLen { - return assembleLoad(RegA, 4, opAddrModePacketLen, 0) - } - return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) -} - -// String returns the instruction in assembler notation. -func (a LoadExtension) String() string { - switch a.Num { - case ExtLen: - return "ld #len" - case ExtProto: - return "ld #proto" - case ExtType: - return "ld #type" - case ExtPayloadOffset: - return "ld #poff" - case ExtInterfaceIndex: - return "ld #ifidx" - case ExtNetlinkAttr: - return "ld #nla" - case ExtNetlinkAttrNested: - return "ld #nlan" - case ExtMark: - return "ld #mark" - case ExtQueue: - return "ld #queue" - case ExtLinkLayerType: - return "ld #hatype" - case ExtRXHash: - return "ld #rxhash" - case ExtCPUID: - return "ld #cpu" - case ExtVLANTag: - return "ld #vlan_tci" - case ExtVLANTagPresent: - return "ld #vlan_avail" - case ExtVLANProto: - return "ld #vlan_tpid" - case ExtRand: - return "ld #rand" - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// StoreScratch stores register Src into scratch[N]. -type StoreScratch struct { - Src Register - N int // 0-15 -} - -// Assemble implements the Instruction Assemble method. -func (a StoreScratch) Assemble() (RawInstruction, error) { - if a.N < 0 || a.N > 15 { - return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N) - } - var op uint16 - switch a.Src { - case RegA: - op = opClsStoreA - case RegX: - op = opClsStoreX - default: - return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src) - } - - return RawInstruction{ - Op: op, - K: uint32(a.N), - }, nil -} - -// String returns the instruction in assembler notation. -func (a StoreScratch) String() string { - switch a.Src { - case RegA: - return fmt.Sprintf("st M[%d]", a.N) - case RegX: - return fmt.Sprintf("stx M[%d]", a.N) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// ALUOpConstant executes A = A Val. -type ALUOpConstant struct { - Op ALUOp - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a ALUOpConstant) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(opOperandConstant) | uint16(a.Op), - K: a.Val, - }, nil -} - -// String returns the instruction in assembler notation. -func (a ALUOpConstant) String() string { - switch a.Op { - case ALUOpAdd: - return fmt.Sprintf("add #%d", a.Val) - case ALUOpSub: - return fmt.Sprintf("sub #%d", a.Val) - case ALUOpMul: - return fmt.Sprintf("mul #%d", a.Val) - case ALUOpDiv: - return fmt.Sprintf("div #%d", a.Val) - case ALUOpMod: - return fmt.Sprintf("mod #%d", a.Val) - case ALUOpAnd: - return fmt.Sprintf("and #%d", a.Val) - case ALUOpOr: - return fmt.Sprintf("or #%d", a.Val) - case ALUOpXor: - return fmt.Sprintf("xor #%d", a.Val) - case ALUOpShiftLeft: - return fmt.Sprintf("lsh #%d", a.Val) - case ALUOpShiftRight: - return fmt.Sprintf("rsh #%d", a.Val) - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// ALUOpX executes A = A X -type ALUOpX struct { - Op ALUOp -} - -// Assemble implements the Instruction Assemble method. -func (a ALUOpX) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(opOperandX) | uint16(a.Op), - }, nil -} - -// String returns the instruction in assembler notation. -func (a ALUOpX) String() string { - switch a.Op { - case ALUOpAdd: - return "add x" - case ALUOpSub: - return "sub x" - case ALUOpMul: - return "mul x" - case ALUOpDiv: - return "div x" - case ALUOpMod: - return "mod x" - case ALUOpAnd: - return "and x" - case ALUOpOr: - return "or x" - case ALUOpXor: - return "xor x" - case ALUOpShiftLeft: - return "lsh x" - case ALUOpShiftRight: - return "rsh x" - default: - return fmt.Sprintf("unknown instruction: %#v", a) - } -} - -// NegateA executes A = -A. -type NegateA struct{} - -// Assemble implements the Instruction Assemble method. -func (a NegateA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsALU | uint16(aluOpNeg), - }, nil -} - -// String returns the instruction in assembler notation. -func (a NegateA) String() string { - return fmt.Sprintf("neg") -} - -// Jump skips the following Skip instructions in the program. -type Jump struct { - Skip uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a Jump) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsJump | uint16(opJumpAlways), - K: a.Skip, - }, nil -} - -// String returns the instruction in assembler notation. -func (a Jump) String() string { - return fmt.Sprintf("ja %d", a.Skip) -} - -// JumpIf skips the following Skip instructions in the program if A -// Val is true. -type JumpIf struct { - Cond JumpTest - Val uint32 - SkipTrue uint8 - SkipFalse uint8 -} - -// Assemble implements the Instruction Assemble method. -func (a JumpIf) Assemble() (RawInstruction, error) { - return jumpToRaw(a.Cond, opOperandConstant, a.Val, a.SkipTrue, a.SkipFalse) -} - -// String returns the instruction in assembler notation. -func (a JumpIf) String() string { - return jumpToString(a.Cond, fmt.Sprintf("#%d", a.Val), a.SkipTrue, a.SkipFalse) -} - -// JumpIfX skips the following Skip instructions in the program if A -// X is true. -type JumpIfX struct { - Cond JumpTest - SkipTrue uint8 - SkipFalse uint8 -} - -// Assemble implements the Instruction Assemble method. -func (a JumpIfX) Assemble() (RawInstruction, error) { - return jumpToRaw(a.Cond, opOperandX, 0, a.SkipTrue, a.SkipFalse) -} - -// String returns the instruction in assembler notation. -func (a JumpIfX) String() string { - return jumpToString(a.Cond, "x", a.SkipTrue, a.SkipFalse) -} - -// jumpToRaw assembles a jump instruction into a RawInstruction -func jumpToRaw(test JumpTest, operand opOperand, k uint32, skipTrue, skipFalse uint8) (RawInstruction, error) { - var ( - cond jumpOp - flip bool - ) - switch test { - case JumpEqual: - cond = opJumpEqual - case JumpNotEqual: - cond, flip = opJumpEqual, true - case JumpGreaterThan: - cond = opJumpGT - case JumpLessThan: - cond, flip = opJumpGE, true - case JumpGreaterOrEqual: - cond = opJumpGE - case JumpLessOrEqual: - cond, flip = opJumpGT, true - case JumpBitsSet: - cond = opJumpSet - case JumpBitsNotSet: - cond, flip = opJumpSet, true - default: - return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", test) - } - jt, jf := skipTrue, skipFalse - if flip { - jt, jf = jf, jt - } - return RawInstruction{ - Op: opClsJump | uint16(cond) | uint16(operand), - Jt: jt, - Jf: jf, - K: k, - }, nil -} - -// jumpToString converts a jump instruction to assembler notation -func jumpToString(cond JumpTest, operand string, skipTrue, skipFalse uint8) string { - switch cond { - // K == A - case JumpEqual: - return conditionalJump(operand, skipTrue, skipFalse, "jeq", "jneq") - // K != A - case JumpNotEqual: - return fmt.Sprintf("jneq %s,%d", operand, skipTrue) - // K > A - case JumpGreaterThan: - return conditionalJump(operand, skipTrue, skipFalse, "jgt", "jle") - // K < A - case JumpLessThan: - return fmt.Sprintf("jlt %s,%d", operand, skipTrue) - // K >= A - case JumpGreaterOrEqual: - return conditionalJump(operand, skipTrue, skipFalse, "jge", "jlt") - // K <= A - case JumpLessOrEqual: - return fmt.Sprintf("jle %s,%d", operand, skipTrue) - // K & A != 0 - case JumpBitsSet: - if skipFalse > 0 { - return fmt.Sprintf("jset %s,%d,%d", operand, skipTrue, skipFalse) - } - return fmt.Sprintf("jset %s,%d", operand, skipTrue) - // K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips - case JumpBitsNotSet: - return jumpToString(JumpBitsSet, operand, skipFalse, skipTrue) - default: - return fmt.Sprintf("unknown JumpTest %#v", cond) - } -} - -func conditionalJump(operand string, skipTrue, skipFalse uint8, positiveJump, negativeJump string) string { - if skipTrue > 0 { - if skipFalse > 0 { - return fmt.Sprintf("%s %s,%d,%d", positiveJump, operand, skipTrue, skipFalse) - } - return fmt.Sprintf("%s %s,%d", positiveJump, operand, skipTrue) - } - return fmt.Sprintf("%s %s,%d", negativeJump, operand, skipFalse) -} - -// RetA exits the BPF program, returning the value of register A. -type RetA struct{} - -// Assemble implements the Instruction Assemble method. -func (a RetA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsReturn | opRetSrcA, - }, nil -} - -// String returns the instruction in assembler notation. -func (a RetA) String() string { - return fmt.Sprintf("ret a") -} - -// RetConstant exits the BPF program, returning a constant value. -type RetConstant struct { - Val uint32 -} - -// Assemble implements the Instruction Assemble method. -func (a RetConstant) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsReturn | opRetSrcConstant, - K: a.Val, - }, nil -} - -// String returns the instruction in assembler notation. -func (a RetConstant) String() string { - return fmt.Sprintf("ret #%d", a.Val) -} - -// TXA copies the value of register X to register A. -type TXA struct{} - -// Assemble implements the Instruction Assemble method. -func (a TXA) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsMisc | opMiscTXA, - }, nil -} - -// String returns the instruction in assembler notation. -func (a TXA) String() string { - return fmt.Sprintf("txa") -} - -// TAX copies the value of register A to register X. -type TAX struct{} - -// Assemble implements the Instruction Assemble method. -func (a TAX) Assemble() (RawInstruction, error) { - return RawInstruction{ - Op: opClsMisc | opMiscTAX, - }, nil -} - -// String returns the instruction in assembler notation. -func (a TAX) String() string { - return fmt.Sprintf("tax") -} - -func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) { - var ( - cls uint16 - sz uint16 - ) - switch dst { - case RegA: - cls = opClsLoadA - case RegX: - cls = opClsLoadX - default: - return RawInstruction{}, fmt.Errorf("invalid target register %v", dst) - } - switch loadSize { - case 1: - sz = opLoadWidth1 - case 2: - sz = opLoadWidth2 - case 4: - sz = opLoadWidth4 - default: - return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz) - } - return RawInstruction{ - Op: cls | sz | mode, - K: k, - }, nil -} diff --git a/vendor/golang.org/x/net/bpf/setter.go b/vendor/golang.org/x/net/bpf/setter.go deleted file mode 100644 index 43e35f0ac..000000000 --- a/vendor/golang.org/x/net/bpf/setter.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -// A Setter is a type which can attach a compiled BPF filter to itself. -type Setter interface { - SetBPF(filter []RawInstruction) error -} diff --git a/vendor/golang.org/x/net/bpf/vm.go b/vendor/golang.org/x/net/bpf/vm.go deleted file mode 100644 index 73f57f1f7..000000000 --- a/vendor/golang.org/x/net/bpf/vm.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import ( - "errors" - "fmt" -) - -// A VM is an emulated BPF virtual machine. -type VM struct { - filter []Instruction -} - -// NewVM returns a new VM using the input BPF program. -func NewVM(filter []Instruction) (*VM, error) { - if len(filter) == 0 { - return nil, errors.New("one or more Instructions must be specified") - } - - for i, ins := range filter { - check := len(filter) - (i + 1) - switch ins := ins.(type) { - // Check for out-of-bounds jumps in instructions - case Jump: - if check <= int(ins.Skip) { - return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip) - } - case JumpIf: - if check <= int(ins.SkipTrue) { - return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) - } - if check <= int(ins.SkipFalse) { - return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) - } - case JumpIfX: - if check <= int(ins.SkipTrue) { - return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue) - } - if check <= int(ins.SkipFalse) { - return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse) - } - // Check for division or modulus by zero - case ALUOpConstant: - if ins.Val != 0 { - break - } - - switch ins.Op { - case ALUOpDiv, ALUOpMod: - return nil, errors.New("cannot divide by zero using ALUOpConstant") - } - // Check for unknown extensions - case LoadExtension: - switch ins.Num { - case ExtLen: - default: - return nil, fmt.Errorf("extension %d not implemented", ins.Num) - } - } - } - - // Make sure last instruction is a return instruction - switch filter[len(filter)-1].(type) { - case RetA, RetConstant: - default: - return nil, errors.New("BPF program must end with RetA or RetConstant") - } - - // Though our VM works using disassembled instructions, we - // attempt to assemble the input filter anyway to ensure it is compatible - // with an operating system VM. - _, err := Assemble(filter) - - return &VM{ - filter: filter, - }, err -} - -// Run runs the VM's BPF program against the input bytes. -// Run returns the number of bytes accepted by the BPF program, and any errors -// which occurred while processing the program. -func (v *VM) Run(in []byte) (int, error) { - var ( - // Registers of the virtual machine - regA uint32 - regX uint32 - regScratch [16]uint32 - - // OK is true if the program should continue processing the next - // instruction, or false if not, causing the loop to break - ok = true - ) - - // TODO(mdlayher): implement: - // - NegateA: - // - would require a change from uint32 registers to int32 - // registers - - // TODO(mdlayher): add interop tests that check signedness of ALU - // operations against kernel implementation, and make sure Go - // implementation matches behavior - - for i := 0; i < len(v.filter) && ok; i++ { - ins := v.filter[i] - - switch ins := ins.(type) { - case ALUOpConstant: - regA = aluOpConstant(ins, regA) - case ALUOpX: - regA, ok = aluOpX(ins, regA, regX) - case Jump: - i += int(ins.Skip) - case JumpIf: - jump := jumpIf(ins, regA) - i += jump - case JumpIfX: - jump := jumpIfX(ins, regA, regX) - i += jump - case LoadAbsolute: - regA, ok = loadAbsolute(ins, in) - case LoadConstant: - regA, regX = loadConstant(ins, regA, regX) - case LoadExtension: - regA = loadExtension(ins, in) - case LoadIndirect: - regA, ok = loadIndirect(ins, in, regX) - case LoadMemShift: - regX, ok = loadMemShift(ins, in) - case LoadScratch: - regA, regX = loadScratch(ins, regScratch, regA, regX) - case RetA: - return int(regA), nil - case RetConstant: - return int(ins.Val), nil - case StoreScratch: - regScratch = storeScratch(ins, regScratch, regA, regX) - case TAX: - regX = regA - case TXA: - regA = regX - default: - return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins) - } - } - - return 0, nil -} diff --git a/vendor/golang.org/x/net/bpf/vm_instructions.go b/vendor/golang.org/x/net/bpf/vm_instructions.go deleted file mode 100644 index f0d2e55bd..000000000 --- a/vendor/golang.org/x/net/bpf/vm_instructions.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package bpf - -import ( - "encoding/binary" - "fmt" -) - -func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 { - return aluOpCommon(ins.Op, regA, ins.Val) -} - -func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) { - // Guard against division or modulus by zero by terminating - // the program, as the OS BPF VM does - if regX == 0 { - switch ins.Op { - case ALUOpDiv, ALUOpMod: - return 0, false - } - } - - return aluOpCommon(ins.Op, regA, regX), true -} - -func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 { - switch op { - case ALUOpAdd: - return regA + value - case ALUOpSub: - return regA - value - case ALUOpMul: - return regA * value - case ALUOpDiv: - // Division by zero not permitted by NewVM and aluOpX checks - return regA / value - case ALUOpOr: - return regA | value - case ALUOpAnd: - return regA & value - case ALUOpShiftLeft: - return regA << value - case ALUOpShiftRight: - return regA >> value - case ALUOpMod: - // Modulus by zero not permitted by NewVM and aluOpX checks - return regA % value - case ALUOpXor: - return regA ^ value - default: - return regA - } -} - -func jumpIf(ins JumpIf, regA uint32) int { - return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, ins.Val) -} - -func jumpIfX(ins JumpIfX, regA uint32, regX uint32) int { - return jumpIfCommon(ins.Cond, ins.SkipTrue, ins.SkipFalse, regA, regX) -} - -func jumpIfCommon(cond JumpTest, skipTrue, skipFalse uint8, regA uint32, value uint32) int { - var ok bool - - switch cond { - case JumpEqual: - ok = regA == value - case JumpNotEqual: - ok = regA != value - case JumpGreaterThan: - ok = regA > value - case JumpLessThan: - ok = regA < value - case JumpGreaterOrEqual: - ok = regA >= value - case JumpLessOrEqual: - ok = regA <= value - case JumpBitsSet: - ok = (regA & value) != 0 - case JumpBitsNotSet: - ok = (regA & value) == 0 - } - - if ok { - return int(skipTrue) - } - - return int(skipFalse) -} - -func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) { - offset := int(ins.Off) - size := int(ins.Size) - - return loadCommon(in, offset, size) -} - -func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) { - switch ins.Dst { - case RegA: - regA = ins.Val - case RegX: - regX = ins.Val - } - - return regA, regX -} - -func loadExtension(ins LoadExtension, in []byte) uint32 { - switch ins.Num { - case ExtLen: - return uint32(len(in)) - default: - panic(fmt.Sprintf("unimplemented extension: %d", ins.Num)) - } -} - -func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) { - offset := int(ins.Off) + int(regX) - size := int(ins.Size) - - return loadCommon(in, offset, size) -} - -func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) { - offset := int(ins.Off) - - if !inBounds(len(in), offset, 0) { - return 0, false - } - - // Mask off high 4 bits and multiply low 4 bits by 4 - return uint32(in[offset]&0x0f) * 4, true -} - -func inBounds(inLen int, offset int, size int) bool { - return offset+size <= inLen -} - -func loadCommon(in []byte, offset int, size int) (uint32, bool) { - if !inBounds(len(in), offset, size) { - return 0, false - } - - switch size { - case 1: - return uint32(in[offset]), true - case 2: - return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true - case 4: - return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true - default: - panic(fmt.Sprintf("invalid load size: %d", size)) - } -} - -func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) { - switch ins.Dst { - case RegA: - regA = regScratch[ins.N] - case RegX: - regX = regScratch[ins.N] - } - - return regA, regX -} - -func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 { - switch ins.Src { - case RegA: - regScratch[ins.N] = regA - case RegX: - regScratch[ins.N] = regX - } - - return regScratch -} diff --git a/vendor/golang.org/x/net/html/atom/atom.go b/vendor/golang.org/x/net/html/atom/atom.go deleted file mode 100644 index cd0a8ac15..000000000 --- a/vendor/golang.org/x/net/html/atom/atom.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package atom provides integer codes (also known as atoms) for a fixed set of -// frequently occurring HTML strings: tag names and attribute keys such as "p" -// and "id". -// -// Sharing an atom's name between all elements with the same tag can result in -// fewer string allocations when tokenizing and parsing HTML. Integer -// comparisons are also generally faster than string comparisons. -// -// The value of an atom's particular code is not guaranteed to stay the same -// between versions of this package. Neither is any ordering guaranteed: -// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to -// be dense. The only guarantees are that e.g. looking up "div" will yield -// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. -package atom // import "golang.org/x/net/html/atom" - -// Atom is an integer code for a string. The zero value maps to "". -type Atom uint32 - -// String returns the atom's name. -func (a Atom) String() string { - start := uint32(a >> 8) - n := uint32(a & 0xff) - if start+n > uint32(len(atomText)) { - return "" - } - return atomText[start : start+n] -} - -func (a Atom) string() string { - return atomText[a>>8 : a>>8+a&0xff] -} - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s []byte) uint32 { - for i := range s { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -func match(s string, t []byte) bool { - for i, c := range t { - if s[i] != c { - return false - } - } - return true -} - -// Lookup returns the atom whose name is s. It returns zero if there is no -// such atom. The lookup is case sensitive. -func Lookup(s []byte) Atom { - if len(s) == 0 || len(s) > maxAtomLen { - return 0 - } - h := fnv(hash0, s) - if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { - return a - } - return 0 -} - -// String returns a string whose contents are equal to s. In that sense, it is -// equivalent to string(s) but may be more efficient. -func String(s []byte) string { - if a := Lookup(s); a != 0 { - return a.String() - } - return string(s) -} diff --git a/vendor/golang.org/x/net/html/atom/gen.go b/vendor/golang.org/x/net/html/atom/gen.go deleted file mode 100644 index 5d052781b..000000000 --- a/vendor/golang.org/x/net/html/atom/gen.go +++ /dev/null @@ -1,712 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -//go:generate go run gen.go -//go:generate go run gen.go -test - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "math/rand" - "os" - "sort" - "strings" -) - -// identifier converts s to a Go exported identifier. -// It converts "div" to "Div" and "accept-charset" to "AcceptCharset". -func identifier(s string) string { - b := make([]byte, 0, len(s)) - cap := true - for _, c := range s { - if c == '-' { - cap = true - continue - } - if cap && 'a' <= c && c <= 'z' { - c -= 'a' - 'A' - } - cap = false - b = append(b, byte(c)) - } - return string(b) -} - -var test = flag.Bool("test", false, "generate table_test.go") - -func genFile(name string, buf *bytes.Buffer) { - b, err := format.Source(buf.Bytes()) - if err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } - if err := ioutil.WriteFile(name, b, 0644); err != nil { - fmt.Fprintln(os.Stderr, err) - os.Exit(1) - } -} - -func main() { - flag.Parse() - - var all []string - all = append(all, elements...) - all = append(all, attributes...) - all = append(all, eventHandlers...) - all = append(all, extra...) - sort.Strings(all) - - // uniq - lists have dups - w := 0 - for _, s := range all { - if w == 0 || all[w-1] != s { - all[w] = s - w++ - } - } - all = all[:w] - - if *test { - var buf bytes.Buffer - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go -test\n") - fmt.Fprintln(&buf, "package atom\n") - fmt.Fprintln(&buf, "var testAtomList = []string{") - for _, s := range all { - fmt.Fprintf(&buf, "\t%q,\n", s) - } - fmt.Fprintln(&buf, "}") - - genFile("table_test.go", &buf) - return - } - - // Find hash that minimizes table size. - var best *table - for i := 0; i < 1000000; i++ { - if best != nil && 1<<(best.k-1) < len(all) { - break - } - h := rand.Uint32() - for k := uint(0); k <= 16; k++ { - if best != nil && k >= best.k { - break - } - var t table - if t.init(h, k, all) { - best = &t - break - } - } - } - if best == nil { - fmt.Fprintf(os.Stderr, "failed to construct string table\n") - os.Exit(1) - } - - // Lay out strings, using overlaps when possible. - layout := append([]string{}, all...) - - // Remove strings that are substrings of other strings - for changed := true; changed; { - changed = false - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i != j && t != "" && strings.Contains(s, t) { - changed = true - layout[j] = "" - } - } - } - } - - // Join strings where one suffix matches another prefix. - for { - // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], - // maximizing overlap length k. - besti := -1 - bestj := -1 - bestk := 0 - for i, s := range layout { - if s == "" { - continue - } - for j, t := range layout { - if i == j { - continue - } - for k := bestk + 1; k <= len(s) && k <= len(t); k++ { - if s[len(s)-k:] == t[:k] { - besti = i - bestj = j - bestk = k - } - } - } - } - if bestk > 0 { - layout[besti] += layout[bestj][bestk:] - layout[bestj] = "" - continue - } - break - } - - text := strings.Join(layout, "") - - atom := map[string]uint32{} - for _, s := range all { - off := strings.Index(text, s) - if off < 0 { - panic("lost string " + s) - } - atom[s] = uint32(off<<8 | len(s)) - } - - var buf bytes.Buffer - // Generate the Go code. - fmt.Fprintln(&buf, "// Code generated by go generate gen.go; DO NOT EDIT.\n") - fmt.Fprintln(&buf, "//go:generate go run gen.go\n") - fmt.Fprintln(&buf, "package atom\n\nconst (") - - // compute max len - maxLen := 0 - for _, s := range all { - if maxLen < len(s) { - maxLen = len(s) - } - fmt.Fprintf(&buf, "\t%s Atom = %#x\n", identifier(s), atom[s]) - } - fmt.Fprintln(&buf, ")\n") - - fmt.Fprintf(&buf, "const hash0 = %#x\n\n", best.h0) - fmt.Fprintf(&buf, "const maxAtomLen = %d\n\n", maxLen) - - fmt.Fprintf(&buf, "var table = [1<<%d]Atom{\n", best.k) - for i, s := range best.tab { - if s == "" { - continue - } - fmt.Fprintf(&buf, "\t%#x: %#x, // %s\n", i, atom[s], s) - } - fmt.Fprintf(&buf, "}\n") - datasize := (1 << best.k) * 4 - - fmt.Fprintln(&buf, "const atomText =") - textsize := len(text) - for len(text) > 60 { - fmt.Fprintf(&buf, "\t%q +\n", text[:60]) - text = text[60:] - } - fmt.Fprintf(&buf, "\t%q\n\n", text) - - genFile("table.go", &buf) - - fmt.Fprintf(os.Stdout, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) -} - -type byLen []string - -func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } -func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } -func (x byLen) Len() int { return len(x) } - -// fnv computes the FNV hash with an arbitrary starting value h. -func fnv(h uint32, s string) uint32 { - for i := 0; i < len(s); i++ { - h ^= uint32(s[i]) - h *= 16777619 - } - return h -} - -// A table represents an attempt at constructing the lookup table. -// The lookup table uses cuckoo hashing, meaning that each string -// can be found in one of two positions. -type table struct { - h0 uint32 - k uint - mask uint32 - tab []string -} - -// hash returns the two hashes for s. -func (t *table) hash(s string) (h1, h2 uint32) { - h := fnv(t.h0, s) - h1 = h & t.mask - h2 = (h >> 16) & t.mask - return -} - -// init initializes the table with the given parameters. -// h0 is the initial hash value, -// k is the number of bits of hash value to use, and -// x is the list of strings to store in the table. -// init returns false if the table cannot be constructed. -func (t *table) init(h0 uint32, k uint, x []string) bool { - t.h0 = h0 - t.k = k - t.tab = make([]string, 1< len(t.tab) { - return false - } - s := t.tab[i] - h1, h2 := t.hash(s) - j := h1 + h2 - i - if t.tab[j] != "" && !t.push(j, depth+1) { - return false - } - t.tab[j] = s - return true -} - -// The lists of element names and attribute keys were taken from -// https://html.spec.whatwg.org/multipage/indices.html#index -// as of the "HTML Living Standard - Last Updated 16 April 2018" version. - -// "command", "keygen" and "menuitem" have been removed from the spec, -// but are kept here for backwards compatibility. -var elements = []string{ - "a", - "abbr", - "address", - "area", - "article", - "aside", - "audio", - "b", - "base", - "bdi", - "bdo", - "blockquote", - "body", - "br", - "button", - "canvas", - "caption", - "cite", - "code", - "col", - "colgroup", - "command", - "data", - "datalist", - "dd", - "del", - "details", - "dfn", - "dialog", - "div", - "dl", - "dt", - "em", - "embed", - "fieldset", - "figcaption", - "figure", - "footer", - "form", - "h1", - "h2", - "h3", - "h4", - "h5", - "h6", - "head", - "header", - "hgroup", - "hr", - "html", - "i", - "iframe", - "img", - "input", - "ins", - "kbd", - "keygen", - "label", - "legend", - "li", - "link", - "main", - "map", - "mark", - "menu", - "menuitem", - "meta", - "meter", - "nav", - "noscript", - "object", - "ol", - "optgroup", - "option", - "output", - "p", - "param", - "picture", - "pre", - "progress", - "q", - "rp", - "rt", - "ruby", - "s", - "samp", - "script", - "section", - "select", - "slot", - "small", - "source", - "span", - "strong", - "style", - "sub", - "summary", - "sup", - "table", - "tbody", - "td", - "template", - "textarea", - "tfoot", - "th", - "thead", - "time", - "title", - "tr", - "track", - "u", - "ul", - "var", - "video", - "wbr", -} - -// https://html.spec.whatwg.org/multipage/indices.html#attributes-3 -// -// "challenge", "command", "contextmenu", "dropzone", "icon", "keytype", "mediagroup", -// "radiogroup", "spellcheck", "scoped", "seamless", "sortable" and "sorted" have been removed from the spec, -// but are kept here for backwards compatibility. -var attributes = []string{ - "abbr", - "accept", - "accept-charset", - "accesskey", - "action", - "allowfullscreen", - "allowpaymentrequest", - "allowusermedia", - "alt", - "as", - "async", - "autocomplete", - "autofocus", - "autoplay", - "challenge", - "charset", - "checked", - "cite", - "class", - "color", - "cols", - "colspan", - "command", - "content", - "contenteditable", - "contextmenu", - "controls", - "coords", - "crossorigin", - "data", - "datetime", - "default", - "defer", - "dir", - "dirname", - "disabled", - "download", - "draggable", - "dropzone", - "enctype", - "for", - "form", - "formaction", - "formenctype", - "formmethod", - "formnovalidate", - "formtarget", - "headers", - "height", - "hidden", - "high", - "href", - "hreflang", - "http-equiv", - "icon", - "id", - "inputmode", - "integrity", - "is", - "ismap", - "itemid", - "itemprop", - "itemref", - "itemscope", - "itemtype", - "keytype", - "kind", - "label", - "lang", - "list", - "loop", - "low", - "manifest", - "max", - "maxlength", - "media", - "mediagroup", - "method", - "min", - "minlength", - "multiple", - "muted", - "name", - "nomodule", - "nonce", - "novalidate", - "open", - "optimum", - "pattern", - "ping", - "placeholder", - "playsinline", - "poster", - "preload", - "radiogroup", - "readonly", - "referrerpolicy", - "rel", - "required", - "reversed", - "rows", - "rowspan", - "sandbox", - "spellcheck", - "scope", - "scoped", - "seamless", - "selected", - "shape", - "size", - "sizes", - "sortable", - "sorted", - "slot", - "span", - "spellcheck", - "src", - "srcdoc", - "srclang", - "srcset", - "start", - "step", - "style", - "tabindex", - "target", - "title", - "translate", - "type", - "typemustmatch", - "updateviacache", - "usemap", - "value", - "width", - "workertype", - "wrap", -} - -// "onautocomplete", "onautocompleteerror", "onmousewheel", -// "onshow" and "onsort" have been removed from the spec, -// but are kept here for backwards compatibility. -var eventHandlers = []string{ - "onabort", - "onautocomplete", - "onautocompleteerror", - "onauxclick", - "onafterprint", - "onbeforeprint", - "onbeforeunload", - "onblur", - "oncancel", - "oncanplay", - "oncanplaythrough", - "onchange", - "onclick", - "onclose", - "oncontextmenu", - "oncopy", - "oncuechange", - "oncut", - "ondblclick", - "ondrag", - "ondragend", - "ondragenter", - "ondragexit", - "ondragleave", - "ondragover", - "ondragstart", - "ondrop", - "ondurationchange", - "onemptied", - "onended", - "onerror", - "onfocus", - "onhashchange", - "oninput", - "oninvalid", - "onkeydown", - "onkeypress", - "onkeyup", - "onlanguagechange", - "onload", - "onloadeddata", - "onloadedmetadata", - "onloadend", - "onloadstart", - "onmessage", - "onmessageerror", - "onmousedown", - "onmouseenter", - "onmouseleave", - "onmousemove", - "onmouseout", - "onmouseover", - "onmouseup", - "onmousewheel", - "onwheel", - "onoffline", - "ononline", - "onpagehide", - "onpageshow", - "onpaste", - "onpause", - "onplay", - "onplaying", - "onpopstate", - "onprogress", - "onratechange", - "onreset", - "onresize", - "onrejectionhandled", - "onscroll", - "onsecuritypolicyviolation", - "onseeked", - "onseeking", - "onselect", - "onshow", - "onsort", - "onstalled", - "onstorage", - "onsubmit", - "onsuspend", - "ontimeupdate", - "ontoggle", - "onunhandledrejection", - "onunload", - "onvolumechange", - "onwaiting", -} - -// extra are ad-hoc values not covered by any of the lists above. -var extra = []string{ - "acronym", - "align", - "annotation", - "annotation-xml", - "applet", - "basefont", - "bgsound", - "big", - "blink", - "center", - "color", - "desc", - "face", - "font", - "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. - "foreignobject", - "frame", - "frameset", - "image", - "isindex", - "listing", - "malignmark", - "marquee", - "math", - "mglyph", - "mi", - "mn", - "mo", - "ms", - "mtext", - "nobr", - "noembed", - "noframes", - "plaintext", - "prompt", - "public", - "rb", - "rtc", - "spacer", - "strike", - "svg", - "system", - "tt", - "xmp", -} diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go deleted file mode 100644 index 2a938864c..000000000 --- a/vendor/golang.org/x/net/html/atom/table.go +++ /dev/null @@ -1,783 +0,0 @@ -// Code generated by go generate gen.go; DO NOT EDIT. - -//go:generate go run gen.go - -package atom - -const ( - A Atom = 0x1 - Abbr Atom = 0x4 - Accept Atom = 0x1a06 - AcceptCharset Atom = 0x1a0e - Accesskey Atom = 0x2c09 - Acronym Atom = 0xaa07 - Action Atom = 0x27206 - Address Atom = 0x6f307 - Align Atom = 0xb105 - Allowfullscreen Atom = 0x2080f - Allowpaymentrequest Atom = 0xc113 - Allowusermedia Atom = 0xdd0e - Alt Atom = 0xf303 - Annotation Atom = 0x1c90a - AnnotationXml Atom = 0x1c90e - Applet Atom = 0x31906 - Area Atom = 0x35604 - Article Atom = 0x3fc07 - As Atom = 0x3c02 - Aside Atom = 0x10705 - Async Atom = 0xff05 - Audio Atom = 0x11505 - Autocomplete Atom = 0x2780c - Autofocus Atom = 0x12109 - Autoplay Atom = 0x13c08 - B Atom = 0x101 - Base Atom = 0x3b04 - Basefont Atom = 0x3b08 - Bdi Atom = 0xba03 - Bdo Atom = 0x14b03 - Bgsound Atom = 0x15e07 - Big Atom = 0x17003 - Blink Atom = 0x17305 - Blockquote Atom = 0x1870a - Body Atom = 0x2804 - Br Atom = 0x202 - Button Atom = 0x19106 - Canvas Atom = 0x10306 - Caption Atom = 0x23107 - Center Atom = 0x22006 - Challenge Atom = 0x29b09 - Charset Atom = 0x2107 - Checked Atom = 0x47907 - Cite Atom = 0x19c04 - Class Atom = 0x56405 - Code Atom = 0x5c504 - Col Atom = 0x1ab03 - Colgroup Atom = 0x1ab08 - Color Atom = 0x1bf05 - Cols Atom = 0x1c404 - Colspan Atom = 0x1c407 - Command Atom = 0x1d707 - Content Atom = 0x58b07 - Contenteditable Atom = 0x58b0f - Contextmenu Atom = 0x3800b - Controls Atom = 0x1de08 - Coords Atom = 0x1ea06 - Crossorigin Atom = 0x1fb0b - Data Atom = 0x4a504 - Datalist Atom = 0x4a508 - Datetime Atom = 0x2b808 - Dd Atom = 0x2d702 - Default Atom = 0x10a07 - Defer Atom = 0x5c705 - Del Atom = 0x45203 - Desc Atom = 0x56104 - Details Atom = 0x7207 - Dfn Atom = 0x8703 - Dialog Atom = 0xbb06 - Dir Atom = 0x9303 - Dirname Atom = 0x9307 - Disabled Atom = 0x16408 - Div Atom = 0x16b03 - Dl Atom = 0x5e602 - Download Atom = 0x46308 - Draggable Atom = 0x17a09 - Dropzone Atom = 0x40508 - Dt Atom = 0x64b02 - Em Atom = 0x6e02 - Embed Atom = 0x6e05 - Enctype Atom = 0x28d07 - Face Atom = 0x21e04 - Fieldset Atom = 0x22608 - Figcaption Atom = 0x22e0a - Figure Atom = 0x24806 - Font Atom = 0x3f04 - Footer Atom = 0xf606 - For Atom = 0x25403 - ForeignObject Atom = 0x2540d - Foreignobject Atom = 0x2610d - Form Atom = 0x26e04 - Formaction Atom = 0x26e0a - Formenctype Atom = 0x2890b - Formmethod Atom = 0x2a40a - Formnovalidate Atom = 0x2ae0e - Formtarget Atom = 0x2c00a - Frame Atom = 0x8b05 - Frameset Atom = 0x8b08 - H1 Atom = 0x15c02 - H2 Atom = 0x2de02 - H3 Atom = 0x30d02 - H4 Atom = 0x34502 - H5 Atom = 0x34f02 - H6 Atom = 0x64d02 - Head Atom = 0x33104 - Header Atom = 0x33106 - Headers Atom = 0x33107 - Height Atom = 0x5206 - Hgroup Atom = 0x2ca06 - Hidden Atom = 0x2d506 - High Atom = 0x2db04 - Hr Atom = 0x15702 - Href Atom = 0x2e004 - Hreflang Atom = 0x2e008 - Html Atom = 0x5604 - HttpEquiv Atom = 0x2e80a - I Atom = 0x601 - Icon Atom = 0x58a04 - Id Atom = 0x10902 - Iframe Atom = 0x2fc06 - Image Atom = 0x30205 - Img Atom = 0x30703 - Input Atom = 0x44b05 - Inputmode Atom = 0x44b09 - Ins Atom = 0x20403 - Integrity Atom = 0x23f09 - Is Atom = 0x16502 - Isindex Atom = 0x30f07 - Ismap Atom = 0x31605 - Itemid Atom = 0x38b06 - Itemprop Atom = 0x19d08 - Itemref Atom = 0x3cd07 - Itemscope Atom = 0x67109 - Itemtype Atom = 0x31f08 - Kbd Atom = 0xb903 - Keygen Atom = 0x3206 - Keytype Atom = 0xd607 - Kind Atom = 0x17704 - Label Atom = 0x5905 - Lang Atom = 0x2e404 - Legend Atom = 0x18106 - Li Atom = 0xb202 - Link Atom = 0x17404 - List Atom = 0x4a904 - Listing Atom = 0x4a907 - Loop Atom = 0x5d04 - Low Atom = 0xc303 - Main Atom = 0x1004 - Malignmark Atom = 0xb00a - Manifest Atom = 0x6d708 - Map Atom = 0x31803 - Mark Atom = 0xb604 - Marquee Atom = 0x32707 - Math Atom = 0x32e04 - Max Atom = 0x33d03 - Maxlength Atom = 0x33d09 - Media Atom = 0xe605 - Mediagroup Atom = 0xe60a - Menu Atom = 0x38704 - Menuitem Atom = 0x38708 - Meta Atom = 0x4b804 - Meter Atom = 0x9805 - Method Atom = 0x2a806 - Mglyph Atom = 0x30806 - Mi Atom = 0x34702 - Min Atom = 0x34703 - Minlength Atom = 0x34709 - Mn Atom = 0x2b102 - Mo Atom = 0xa402 - Ms Atom = 0x67402 - Mtext Atom = 0x35105 - Multiple Atom = 0x35f08 - Muted Atom = 0x36705 - Name Atom = 0x9604 - Nav Atom = 0x1303 - Nobr Atom = 0x3704 - Noembed Atom = 0x6c07 - Noframes Atom = 0x8908 - Nomodule Atom = 0xa208 - Nonce Atom = 0x1a605 - Noscript Atom = 0x21608 - Novalidate Atom = 0x2b20a - Object Atom = 0x26806 - Ol Atom = 0x13702 - Onabort Atom = 0x19507 - Onafterprint Atom = 0x2360c - Onautocomplete Atom = 0x2760e - Onautocompleteerror Atom = 0x27613 - Onauxclick Atom = 0x61f0a - Onbeforeprint Atom = 0x69e0d - Onbeforeunload Atom = 0x6e70e - Onblur Atom = 0x56d06 - Oncancel Atom = 0x11908 - Oncanplay Atom = 0x14d09 - Oncanplaythrough Atom = 0x14d10 - Onchange Atom = 0x41b08 - Onclick Atom = 0x2f507 - Onclose Atom = 0x36c07 - Oncontextmenu Atom = 0x37e0d - Oncopy Atom = 0x39106 - Oncuechange Atom = 0x3970b - Oncut Atom = 0x3a205 - Ondblclick Atom = 0x3a70a - Ondrag Atom = 0x3b106 - Ondragend Atom = 0x3b109 - Ondragenter Atom = 0x3ba0b - Ondragexit Atom = 0x3c50a - Ondragleave Atom = 0x3df0b - Ondragover Atom = 0x3ea0a - Ondragstart Atom = 0x3f40b - Ondrop Atom = 0x40306 - Ondurationchange Atom = 0x41310 - Onemptied Atom = 0x40a09 - Onended Atom = 0x42307 - Onerror Atom = 0x42a07 - Onfocus Atom = 0x43107 - Onhashchange Atom = 0x43d0c - Oninput Atom = 0x44907 - Oninvalid Atom = 0x45509 - Onkeydown Atom = 0x45e09 - Onkeypress Atom = 0x46b0a - Onkeyup Atom = 0x48007 - Onlanguagechange Atom = 0x48d10 - Onload Atom = 0x49d06 - Onloadeddata Atom = 0x49d0c - Onloadedmetadata Atom = 0x4b010 - Onloadend Atom = 0x4c609 - Onloadstart Atom = 0x4cf0b - Onmessage Atom = 0x4da09 - Onmessageerror Atom = 0x4da0e - Onmousedown Atom = 0x4e80b - Onmouseenter Atom = 0x4f30c - Onmouseleave Atom = 0x4ff0c - Onmousemove Atom = 0x50b0b - Onmouseout Atom = 0x5160a - Onmouseover Atom = 0x5230b - Onmouseup Atom = 0x52e09 - Onmousewheel Atom = 0x53c0c - Onoffline Atom = 0x54809 - Ononline Atom = 0x55108 - Onpagehide Atom = 0x5590a - Onpageshow Atom = 0x5730a - Onpaste Atom = 0x57f07 - Onpause Atom = 0x59a07 - Onplay Atom = 0x5a406 - Onplaying Atom = 0x5a409 - Onpopstate Atom = 0x5ad0a - Onprogress Atom = 0x5b70a - Onratechange Atom = 0x5cc0c - Onrejectionhandled Atom = 0x5d812 - Onreset Atom = 0x5ea07 - Onresize Atom = 0x5f108 - Onscroll Atom = 0x60008 - Onsecuritypolicyviolation Atom = 0x60819 - Onseeked Atom = 0x62908 - Onseeking Atom = 0x63109 - Onselect Atom = 0x63a08 - Onshow Atom = 0x64406 - Onsort Atom = 0x64f06 - Onstalled Atom = 0x65909 - Onstorage Atom = 0x66209 - Onsubmit Atom = 0x66b08 - Onsuspend Atom = 0x67b09 - Ontimeupdate Atom = 0x400c - Ontoggle Atom = 0x68408 - Onunhandledrejection Atom = 0x68c14 - Onunload Atom = 0x6ab08 - Onvolumechange Atom = 0x6b30e - Onwaiting Atom = 0x6c109 - Onwheel Atom = 0x6ca07 - Open Atom = 0x1a304 - Optgroup Atom = 0x5f08 - Optimum Atom = 0x6d107 - Option Atom = 0x6e306 - Output Atom = 0x51d06 - P Atom = 0xc01 - Param Atom = 0xc05 - Pattern Atom = 0x6607 - Picture Atom = 0x7b07 - Ping Atom = 0xef04 - Placeholder Atom = 0x1310b - Plaintext Atom = 0x1b209 - Playsinline Atom = 0x1400b - Poster Atom = 0x2cf06 - Pre Atom = 0x47003 - Preload Atom = 0x48607 - Progress Atom = 0x5b908 - Prompt Atom = 0x53606 - Public Atom = 0x58606 - Q Atom = 0xcf01 - Radiogroup Atom = 0x30a - Rb Atom = 0x3a02 - Readonly Atom = 0x35708 - Referrerpolicy Atom = 0x3d10e - Rel Atom = 0x48703 - Required Atom = 0x24c08 - Reversed Atom = 0x8008 - Rows Atom = 0x9c04 - Rowspan Atom = 0x9c07 - Rp Atom = 0x23c02 - Rt Atom = 0x19a02 - Rtc Atom = 0x19a03 - Ruby Atom = 0xfb04 - S Atom = 0x2501 - Samp Atom = 0x7804 - Sandbox Atom = 0x12907 - Scope Atom = 0x67505 - Scoped Atom = 0x67506 - Script Atom = 0x21806 - Seamless Atom = 0x37108 - Section Atom = 0x56807 - Select Atom = 0x63c06 - Selected Atom = 0x63c08 - Shape Atom = 0x1e505 - Size Atom = 0x5f504 - Sizes Atom = 0x5f505 - Slot Atom = 0x1ef04 - Small Atom = 0x20605 - Sortable Atom = 0x65108 - Sorted Atom = 0x33706 - Source Atom = 0x37806 - Spacer Atom = 0x43706 - Span Atom = 0x9f04 - Spellcheck Atom = 0x4740a - Src Atom = 0x5c003 - Srcdoc Atom = 0x5c006 - Srclang Atom = 0x5f907 - Srcset Atom = 0x6f906 - Start Atom = 0x3fa05 - Step Atom = 0x58304 - Strike Atom = 0xd206 - Strong Atom = 0x6dd06 - Style Atom = 0x6ff05 - Sub Atom = 0x66d03 - Summary Atom = 0x70407 - Sup Atom = 0x70b03 - Svg Atom = 0x70e03 - System Atom = 0x71106 - Tabindex Atom = 0x4be08 - Table Atom = 0x59505 - Target Atom = 0x2c406 - Tbody Atom = 0x2705 - Td Atom = 0x9202 - Template Atom = 0x71408 - Textarea Atom = 0x35208 - Tfoot Atom = 0xf505 - Th Atom = 0x15602 - Thead Atom = 0x33005 - Time Atom = 0x4204 - Title Atom = 0x11005 - Tr Atom = 0xcc02 - Track Atom = 0x1ba05 - Translate Atom = 0x1f209 - Tt Atom = 0x6802 - Type Atom = 0xd904 - Typemustmatch Atom = 0x2900d - U Atom = 0xb01 - Ul Atom = 0xa702 - Updateviacache Atom = 0x460e - Usemap Atom = 0x59e06 - Value Atom = 0x1505 - Var Atom = 0x16d03 - Video Atom = 0x2f105 - Wbr Atom = 0x57c03 - Width Atom = 0x64905 - Workertype Atom = 0x71c0a - Wrap Atom = 0x72604 - Xmp Atom = 0x12f03 -) - -const hash0 = 0x81cdf10e - -const maxAtomLen = 25 - -var table = [1 << 9]Atom{ - 0x1: 0xe60a, // mediagroup - 0x2: 0x2e404, // lang - 0x4: 0x2c09, // accesskey - 0x5: 0x8b08, // frameset - 0x7: 0x63a08, // onselect - 0x8: 0x71106, // system - 0xa: 0x64905, // width - 0xc: 0x2890b, // formenctype - 0xd: 0x13702, // ol - 0xe: 0x3970b, // oncuechange - 0x10: 0x14b03, // bdo - 0x11: 0x11505, // audio - 0x12: 0x17a09, // draggable - 0x14: 0x2f105, // video - 0x15: 0x2b102, // mn - 0x16: 0x38704, // menu - 0x17: 0x2cf06, // poster - 0x19: 0xf606, // footer - 0x1a: 0x2a806, // method - 0x1b: 0x2b808, // datetime - 0x1c: 0x19507, // onabort - 0x1d: 0x460e, // updateviacache - 0x1e: 0xff05, // async - 0x1f: 0x49d06, // onload - 0x21: 0x11908, // oncancel - 0x22: 0x62908, // onseeked - 0x23: 0x30205, // image - 0x24: 0x5d812, // onrejectionhandled - 0x26: 0x17404, // link - 0x27: 0x51d06, // output - 0x28: 0x33104, // head - 0x29: 0x4ff0c, // onmouseleave - 0x2a: 0x57f07, // onpaste - 0x2b: 0x5a409, // onplaying - 0x2c: 0x1c407, // colspan - 0x2f: 0x1bf05, // color - 0x30: 0x5f504, // size - 0x31: 0x2e80a, // http-equiv - 0x33: 0x601, // i - 0x34: 0x5590a, // onpagehide - 0x35: 0x68c14, // onunhandledrejection - 0x37: 0x42a07, // onerror - 0x3a: 0x3b08, // basefont - 0x3f: 0x1303, // nav - 0x40: 0x17704, // kind - 0x41: 0x35708, // readonly - 0x42: 0x30806, // mglyph - 0x44: 0xb202, // li - 0x46: 0x2d506, // hidden - 0x47: 0x70e03, // svg - 0x48: 0x58304, // step - 0x49: 0x23f09, // integrity - 0x4a: 0x58606, // public - 0x4c: 0x1ab03, // col - 0x4d: 0x1870a, // blockquote - 0x4e: 0x34f02, // h5 - 0x50: 0x5b908, // progress - 0x51: 0x5f505, // sizes - 0x52: 0x34502, // h4 - 0x56: 0x33005, // thead - 0x57: 0xd607, // keytype - 0x58: 0x5b70a, // onprogress - 0x59: 0x44b09, // inputmode - 0x5a: 0x3b109, // ondragend - 0x5d: 0x3a205, // oncut - 0x5e: 0x43706, // spacer - 0x5f: 0x1ab08, // colgroup - 0x62: 0x16502, // is - 0x65: 0x3c02, // as - 0x66: 0x54809, // onoffline - 0x67: 0x33706, // sorted - 0x69: 0x48d10, // onlanguagechange - 0x6c: 0x43d0c, // onhashchange - 0x6d: 0x9604, // name - 0x6e: 0xf505, // tfoot - 0x6f: 0x56104, // desc - 0x70: 0x33d03, // max - 0x72: 0x1ea06, // coords - 0x73: 0x30d02, // h3 - 0x74: 0x6e70e, // onbeforeunload - 0x75: 0x9c04, // rows - 0x76: 0x63c06, // select - 0x77: 0x9805, // meter - 0x78: 0x38b06, // itemid - 0x79: 0x53c0c, // onmousewheel - 0x7a: 0x5c006, // srcdoc - 0x7d: 0x1ba05, // track - 0x7f: 0x31f08, // itemtype - 0x82: 0xa402, // mo - 0x83: 0x41b08, // onchange - 0x84: 0x33107, // headers - 0x85: 0x5cc0c, // onratechange - 0x86: 0x60819, // onsecuritypolicyviolation - 0x88: 0x4a508, // datalist - 0x89: 0x4e80b, // onmousedown - 0x8a: 0x1ef04, // slot - 0x8b: 0x4b010, // onloadedmetadata - 0x8c: 0x1a06, // accept - 0x8d: 0x26806, // object - 0x91: 0x6b30e, // onvolumechange - 0x92: 0x2107, // charset - 0x93: 0x27613, // onautocompleteerror - 0x94: 0xc113, // allowpaymentrequest - 0x95: 0x2804, // body - 0x96: 0x10a07, // default - 0x97: 0x63c08, // selected - 0x98: 0x21e04, // face - 0x99: 0x1e505, // shape - 0x9b: 0x68408, // ontoggle - 0x9e: 0x64b02, // dt - 0x9f: 0xb604, // mark - 0xa1: 0xb01, // u - 0xa4: 0x6ab08, // onunload - 0xa5: 0x5d04, // loop - 0xa6: 0x16408, // disabled - 0xaa: 0x42307, // onended - 0xab: 0xb00a, // malignmark - 0xad: 0x67b09, // onsuspend - 0xae: 0x35105, // mtext - 0xaf: 0x64f06, // onsort - 0xb0: 0x19d08, // itemprop - 0xb3: 0x67109, // itemscope - 0xb4: 0x17305, // blink - 0xb6: 0x3b106, // ondrag - 0xb7: 0xa702, // ul - 0xb8: 0x26e04, // form - 0xb9: 0x12907, // sandbox - 0xba: 0x8b05, // frame - 0xbb: 0x1505, // value - 0xbc: 0x66209, // onstorage - 0xbf: 0xaa07, // acronym - 0xc0: 0x19a02, // rt - 0xc2: 0x202, // br - 0xc3: 0x22608, // fieldset - 0xc4: 0x2900d, // typemustmatch - 0xc5: 0xa208, // nomodule - 0xc6: 0x6c07, // noembed - 0xc7: 0x69e0d, // onbeforeprint - 0xc8: 0x19106, // button - 0xc9: 0x2f507, // onclick - 0xca: 0x70407, // summary - 0xcd: 0xfb04, // ruby - 0xce: 0x56405, // class - 0xcf: 0x3f40b, // ondragstart - 0xd0: 0x23107, // caption - 0xd4: 0xdd0e, // allowusermedia - 0xd5: 0x4cf0b, // onloadstart - 0xd9: 0x16b03, // div - 0xda: 0x4a904, // list - 0xdb: 0x32e04, // math - 0xdc: 0x44b05, // input - 0xdf: 0x3ea0a, // ondragover - 0xe0: 0x2de02, // h2 - 0xe2: 0x1b209, // plaintext - 0xe4: 0x4f30c, // onmouseenter - 0xe7: 0x47907, // checked - 0xe8: 0x47003, // pre - 0xea: 0x35f08, // multiple - 0xeb: 0xba03, // bdi - 0xec: 0x33d09, // maxlength - 0xed: 0xcf01, // q - 0xee: 0x61f0a, // onauxclick - 0xf0: 0x57c03, // wbr - 0xf2: 0x3b04, // base - 0xf3: 0x6e306, // option - 0xf5: 0x41310, // ondurationchange - 0xf7: 0x8908, // noframes - 0xf9: 0x40508, // dropzone - 0xfb: 0x67505, // scope - 0xfc: 0x8008, // reversed - 0xfd: 0x3ba0b, // ondragenter - 0xfe: 0x3fa05, // start - 0xff: 0x12f03, // xmp - 0x100: 0x5f907, // srclang - 0x101: 0x30703, // img - 0x104: 0x101, // b - 0x105: 0x25403, // for - 0x106: 0x10705, // aside - 0x107: 0x44907, // oninput - 0x108: 0x35604, // area - 0x109: 0x2a40a, // formmethod - 0x10a: 0x72604, // wrap - 0x10c: 0x23c02, // rp - 0x10d: 0x46b0a, // onkeypress - 0x10e: 0x6802, // tt - 0x110: 0x34702, // mi - 0x111: 0x36705, // muted - 0x112: 0xf303, // alt - 0x113: 0x5c504, // code - 0x114: 0x6e02, // em - 0x115: 0x3c50a, // ondragexit - 0x117: 0x9f04, // span - 0x119: 0x6d708, // manifest - 0x11a: 0x38708, // menuitem - 0x11b: 0x58b07, // content - 0x11d: 0x6c109, // onwaiting - 0x11f: 0x4c609, // onloadend - 0x121: 0x37e0d, // oncontextmenu - 0x123: 0x56d06, // onblur - 0x124: 0x3fc07, // article - 0x125: 0x9303, // dir - 0x126: 0xef04, // ping - 0x127: 0x24c08, // required - 0x128: 0x45509, // oninvalid - 0x129: 0xb105, // align - 0x12b: 0x58a04, // icon - 0x12c: 0x64d02, // h6 - 0x12d: 0x1c404, // cols - 0x12e: 0x22e0a, // figcaption - 0x12f: 0x45e09, // onkeydown - 0x130: 0x66b08, // onsubmit - 0x131: 0x14d09, // oncanplay - 0x132: 0x70b03, // sup - 0x133: 0xc01, // p - 0x135: 0x40a09, // onemptied - 0x136: 0x39106, // oncopy - 0x137: 0x19c04, // cite - 0x138: 0x3a70a, // ondblclick - 0x13a: 0x50b0b, // onmousemove - 0x13c: 0x66d03, // sub - 0x13d: 0x48703, // rel - 0x13e: 0x5f08, // optgroup - 0x142: 0x9c07, // rowspan - 0x143: 0x37806, // source - 0x144: 0x21608, // noscript - 0x145: 0x1a304, // open - 0x146: 0x20403, // ins - 0x147: 0x2540d, // foreignObject - 0x148: 0x5ad0a, // onpopstate - 0x14a: 0x28d07, // enctype - 0x14b: 0x2760e, // onautocomplete - 0x14c: 0x35208, // textarea - 0x14e: 0x2780c, // autocomplete - 0x14f: 0x15702, // hr - 0x150: 0x1de08, // controls - 0x151: 0x10902, // id - 0x153: 0x2360c, // onafterprint - 0x155: 0x2610d, // foreignobject - 0x156: 0x32707, // marquee - 0x157: 0x59a07, // onpause - 0x158: 0x5e602, // dl - 0x159: 0x5206, // height - 0x15a: 0x34703, // min - 0x15b: 0x9307, // dirname - 0x15c: 0x1f209, // translate - 0x15d: 0x5604, // html - 0x15e: 0x34709, // minlength - 0x15f: 0x48607, // preload - 0x160: 0x71408, // template - 0x161: 0x3df0b, // ondragleave - 0x162: 0x3a02, // rb - 0x164: 0x5c003, // src - 0x165: 0x6dd06, // strong - 0x167: 0x7804, // samp - 0x168: 0x6f307, // address - 0x169: 0x55108, // ononline - 0x16b: 0x1310b, // placeholder - 0x16c: 0x2c406, // target - 0x16d: 0x20605, // small - 0x16e: 0x6ca07, // onwheel - 0x16f: 0x1c90a, // annotation - 0x170: 0x4740a, // spellcheck - 0x171: 0x7207, // details - 0x172: 0x10306, // canvas - 0x173: 0x12109, // autofocus - 0x174: 0xc05, // param - 0x176: 0x46308, // download - 0x177: 0x45203, // del - 0x178: 0x36c07, // onclose - 0x179: 0xb903, // kbd - 0x17a: 0x31906, // applet - 0x17b: 0x2e004, // href - 0x17c: 0x5f108, // onresize - 0x17e: 0x49d0c, // onloadeddata - 0x180: 0xcc02, // tr - 0x181: 0x2c00a, // formtarget - 0x182: 0x11005, // title - 0x183: 0x6ff05, // style - 0x184: 0xd206, // strike - 0x185: 0x59e06, // usemap - 0x186: 0x2fc06, // iframe - 0x187: 0x1004, // main - 0x189: 0x7b07, // picture - 0x18c: 0x31605, // ismap - 0x18e: 0x4a504, // data - 0x18f: 0x5905, // label - 0x191: 0x3d10e, // referrerpolicy - 0x192: 0x15602, // th - 0x194: 0x53606, // prompt - 0x195: 0x56807, // section - 0x197: 0x6d107, // optimum - 0x198: 0x2db04, // high - 0x199: 0x15c02, // h1 - 0x19a: 0x65909, // onstalled - 0x19b: 0x16d03, // var - 0x19c: 0x4204, // time - 0x19e: 0x67402, // ms - 0x19f: 0x33106, // header - 0x1a0: 0x4da09, // onmessage - 0x1a1: 0x1a605, // nonce - 0x1a2: 0x26e0a, // formaction - 0x1a3: 0x22006, // center - 0x1a4: 0x3704, // nobr - 0x1a5: 0x59505, // table - 0x1a6: 0x4a907, // listing - 0x1a7: 0x18106, // legend - 0x1a9: 0x29b09, // challenge - 0x1aa: 0x24806, // figure - 0x1ab: 0xe605, // media - 0x1ae: 0xd904, // type - 0x1af: 0x3f04, // font - 0x1b0: 0x4da0e, // onmessageerror - 0x1b1: 0x37108, // seamless - 0x1b2: 0x8703, // dfn - 0x1b3: 0x5c705, // defer - 0x1b4: 0xc303, // low - 0x1b5: 0x19a03, // rtc - 0x1b6: 0x5230b, // onmouseover - 0x1b7: 0x2b20a, // novalidate - 0x1b8: 0x71c0a, // workertype - 0x1ba: 0x3cd07, // itemref - 0x1bd: 0x1, // a - 0x1be: 0x31803, // map - 0x1bf: 0x400c, // ontimeupdate - 0x1c0: 0x15e07, // bgsound - 0x1c1: 0x3206, // keygen - 0x1c2: 0x2705, // tbody - 0x1c5: 0x64406, // onshow - 0x1c7: 0x2501, // s - 0x1c8: 0x6607, // pattern - 0x1cc: 0x14d10, // oncanplaythrough - 0x1ce: 0x2d702, // dd - 0x1cf: 0x6f906, // srcset - 0x1d0: 0x17003, // big - 0x1d2: 0x65108, // sortable - 0x1d3: 0x48007, // onkeyup - 0x1d5: 0x5a406, // onplay - 0x1d7: 0x4b804, // meta - 0x1d8: 0x40306, // ondrop - 0x1da: 0x60008, // onscroll - 0x1db: 0x1fb0b, // crossorigin - 0x1dc: 0x5730a, // onpageshow - 0x1dd: 0x4, // abbr - 0x1de: 0x9202, // td - 0x1df: 0x58b0f, // contenteditable - 0x1e0: 0x27206, // action - 0x1e1: 0x1400b, // playsinline - 0x1e2: 0x43107, // onfocus - 0x1e3: 0x2e008, // hreflang - 0x1e5: 0x5160a, // onmouseout - 0x1e6: 0x5ea07, // onreset - 0x1e7: 0x13c08, // autoplay - 0x1e8: 0x63109, // onseeking - 0x1ea: 0x67506, // scoped - 0x1ec: 0x30a, // radiogroup - 0x1ee: 0x3800b, // contextmenu - 0x1ef: 0x52e09, // onmouseup - 0x1f1: 0x2ca06, // hgroup - 0x1f2: 0x2080f, // allowfullscreen - 0x1f3: 0x4be08, // tabindex - 0x1f6: 0x30f07, // isindex - 0x1f7: 0x1a0e, // accept-charset - 0x1f8: 0x2ae0e, // formnovalidate - 0x1fb: 0x1c90e, // annotation-xml - 0x1fc: 0x6e05, // embed - 0x1fd: 0x21806, // script - 0x1fe: 0xbb06, // dialog - 0x1ff: 0x1d707, // command -} - -const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" + - "asefontimeupdateviacacheightmlabelooptgroupatternoembedetail" + - "sampictureversedfnoframesetdirnameterowspanomoduleacronymali" + - "gnmarkbdialogallowpaymentrequestrikeytypeallowusermediagroup" + - "ingaltfooterubyasyncanvasidefaultitleaudioncancelautofocusan" + - "dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" + - "bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" + - "penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" + - "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" + - "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" + - "ignObjectforeignobjectformactionautocompleteerrorformenctype" + - "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" + - "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" + - "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" + - "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" + - "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" + - "enterondragexitemreferrerpolicyondragleaveondragoverondragst" + - "articleondropzonemptiedondurationchangeonendedonerroronfocus" + - "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" + - "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" + - "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" + - "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" + - "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" + - "classectionbluronpageshowbronpastepublicontenteditableonpaus" + - "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" + - "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" + - "violationauxclickonseekedonseekingonselectedonshowidth6onsor" + - "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" + - "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" + - "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" + - "arysupsvgsystemplateworkertypewrap" diff --git a/vendor/golang.org/x/net/html/const.go b/vendor/golang.org/x/net/html/const.go deleted file mode 100644 index a3a918f0b..000000000 --- a/vendor/golang.org/x/net/html/const.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// Section 12.2.4.2 of the HTML5 specification says "The following elements -// have varying levels of special parsing rules". -// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements -var isSpecialElementMap = map[string]bool{ - "address": true, - "applet": true, - "area": true, - "article": true, - "aside": true, - "base": true, - "basefont": true, - "bgsound": true, - "blockquote": true, - "body": true, - "br": true, - "button": true, - "caption": true, - "center": true, - "col": true, - "colgroup": true, - "dd": true, - "details": true, - "dir": true, - "div": true, - "dl": true, - "dt": true, - "embed": true, - "fieldset": true, - "figcaption": true, - "figure": true, - "footer": true, - "form": true, - "frame": true, - "frameset": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "header": true, - "hgroup": true, - "hr": true, - "html": true, - "iframe": true, - "img": true, - "input": true, - "isindex": true, // The 'isindex' element has been removed, but keep it for backwards compatibility. - "keygen": true, - "li": true, - "link": true, - "listing": true, - "main": true, - "marquee": true, - "menu": true, - "meta": true, - "nav": true, - "noembed": true, - "noframes": true, - "noscript": true, - "object": true, - "ol": true, - "p": true, - "param": true, - "plaintext": true, - "pre": true, - "script": true, - "section": true, - "select": true, - "source": true, - "style": true, - "summary": true, - "table": true, - "tbody": true, - "td": true, - "template": true, - "textarea": true, - "tfoot": true, - "th": true, - "thead": true, - "title": true, - "tr": true, - "track": true, - "ul": true, - "wbr": true, - "xmp": true, -} - -func isSpecialElement(element *Node) bool { - switch element.Namespace { - case "", "html": - return isSpecialElementMap[element.Data] - case "math": - switch element.Data { - case "mi", "mo", "mn", "ms", "mtext", "annotation-xml": - return true - } - case "svg": - switch element.Data { - case "foreignObject", "desc", "title": - return true - } - } - return false -} diff --git a/vendor/golang.org/x/net/html/doc.go b/vendor/golang.org/x/net/html/doc.go deleted file mode 100644 index 822ed42a0..000000000 --- a/vendor/golang.org/x/net/html/doc.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package html implements an HTML5-compliant tokenizer and parser. - -Tokenization is done by creating a Tokenizer for an io.Reader r. It is the -caller's responsibility to ensure that r provides UTF-8 encoded HTML. - - z := html.NewTokenizer(r) - -Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), -which parses the next token and returns its type, or an error: - - for { - tt := z.Next() - if tt == html.ErrorToken { - // ... - return ... - } - // Process the current token. - } - -There are two APIs for retrieving the current token. The high-level API is to -call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs -allow optionally calling Raw after Next but before Token, Text, TagName, or -TagAttr. In EBNF notation, the valid call sequence per token is: - - Next {Raw} [ Token | Text | TagName {TagAttr} ] - -Token returns an independent data structure that completely describes a token. -Entities (such as "<") are unescaped, tag names and attribute keys are -lower-cased, and attributes are collected into a []Attribute. For example: - - for { - if z.Next() == html.ErrorToken { - // Returning io.EOF indicates success. - return z.Err() - } - emitToken(z.Token()) - } - -The low-level API performs fewer allocations and copies, but the contents of -the []byte values returned by Text, TagName and TagAttr may change on the next -call to Next. For example, to extract an HTML page's anchor text: - - depth := 0 - for { - tt := z.Next() - switch tt { - case html.ErrorToken: - return z.Err() - case html.TextToken: - if depth > 0 { - // emitBytes should copy the []byte it receives, - // if it doesn't process it immediately. - emitBytes(z.Text()) - } - case html.StartTagToken, html.EndTagToken: - tn, _ := z.TagName() - if len(tn) == 1 && tn[0] == 'a' { - if tt == html.StartTagToken { - depth++ - } else { - depth-- - } - } - } - } - -Parsing is done by calling Parse with an io.Reader, which returns the root of -the parse tree (the document element) as a *Node. It is the caller's -responsibility to ensure that the Reader provides UTF-8 encoded HTML. For -example, to process each anchor node in depth-first order: - - doc, err := html.Parse(r) - if err != nil { - // ... - } - var f func(*html.Node) - f = func(n *html.Node) { - if n.Type == html.ElementNode && n.Data == "a" { - // Do something with n... - } - for c := n.FirstChild; c != nil; c = c.NextSibling { - f(c) - } - } - f(doc) - -The relevant specifications include: -https://html.spec.whatwg.org/multipage/syntax.html and -https://html.spec.whatwg.org/multipage/syntax.html#tokenization -*/ -package html // import "golang.org/x/net/html" - -// The tokenization algorithm implemented by this package is not a line-by-line -// transliteration of the relatively verbose state-machine in the WHATWG -// specification. A more direct approach is used instead, where the program -// counter implies the state, such as whether it is tokenizing a tag or a text -// node. Specification compliance is verified by checking expected and actual -// outputs over a test suite rather than aiming for algorithmic fidelity. - -// TODO(nigeltao): Does a DOM API belong in this package or a separate one? -// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/vendor/golang.org/x/net/html/doctype.go b/vendor/golang.org/x/net/html/doctype.go deleted file mode 100644 index c484e5a94..000000000 --- a/vendor/golang.org/x/net/html/doctype.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -// parseDoctype parses the data from a DoctypeToken into a name, -// public identifier, and system identifier. It returns a Node whose Type -// is DoctypeNode, whose Data is the name, and which has attributes -// named "system" and "public" for the two identifiers if they were present. -// quirks is whether the document should be parsed in "quirks mode". -func parseDoctype(s string) (n *Node, quirks bool) { - n = &Node{Type: DoctypeNode} - - // Find the name. - space := strings.IndexAny(s, whitespace) - if space == -1 { - space = len(s) - } - n.Data = s[:space] - // The comparison to "html" is case-sensitive. - if n.Data != "html" { - quirks = true - } - n.Data = strings.ToLower(n.Data) - s = strings.TrimLeft(s[space:], whitespace) - - if len(s) < 6 { - // It can't start with "PUBLIC" or "SYSTEM". - // Ignore the rest of the string. - return n, quirks || s != "" - } - - key := strings.ToLower(s[:6]) - s = s[6:] - for key == "public" || key == "system" { - s = strings.TrimLeft(s, whitespace) - if s == "" { - break - } - quote := s[0] - if quote != '"' && quote != '\'' { - break - } - s = s[1:] - q := strings.IndexRune(s, rune(quote)) - var id string - if q == -1 { - id = s - s = "" - } else { - id = s[:q] - s = s[q+1:] - } - n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) - if key == "public" { - key = "system" - } else { - key = "" - } - } - - if key != "" || s != "" { - quirks = true - } else if len(n.Attr) > 0 { - if n.Attr[0].Key == "public" { - public := strings.ToLower(n.Attr[0].Val) - switch public { - case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": - quirks = true - default: - for _, q := range quirkyIDs { - if strings.HasPrefix(public, q) { - quirks = true - break - } - } - } - // The following two public IDs only cause quirks mode if there is no system ID. - if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || - strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { - quirks = true - } - } - if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && - strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { - quirks = true - } - } - - return n, quirks -} - -// quirkyIDs is a list of public doctype identifiers that cause a document -// to be interpreted in quirks mode. The identifiers should be in lower case. -var quirkyIDs = []string{ - "+//silmaril//dtd html pro v0r11 19970101//", - "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", - "-//as//dtd html 3.0 aswedit + extensions//", - "-//ietf//dtd html 2.0 level 1//", - "-//ietf//dtd html 2.0 level 2//", - "-//ietf//dtd html 2.0 strict level 1//", - "-//ietf//dtd html 2.0 strict level 2//", - "-//ietf//dtd html 2.0 strict//", - "-//ietf//dtd html 2.0//", - "-//ietf//dtd html 2.1e//", - "-//ietf//dtd html 3.0//", - "-//ietf//dtd html 3.2 final//", - "-//ietf//dtd html 3.2//", - "-//ietf//dtd html 3//", - "-//ietf//dtd html level 0//", - "-//ietf//dtd html level 1//", - "-//ietf//dtd html level 2//", - "-//ietf//dtd html level 3//", - "-//ietf//dtd html strict level 0//", - "-//ietf//dtd html strict level 1//", - "-//ietf//dtd html strict level 2//", - "-//ietf//dtd html strict level 3//", - "-//ietf//dtd html strict//", - "-//ietf//dtd html//", - "-//metrius//dtd metrius presentational//", - "-//microsoft//dtd internet explorer 2.0 html strict//", - "-//microsoft//dtd internet explorer 2.0 html//", - "-//microsoft//dtd internet explorer 2.0 tables//", - "-//microsoft//dtd internet explorer 3.0 html strict//", - "-//microsoft//dtd internet explorer 3.0 html//", - "-//microsoft//dtd internet explorer 3.0 tables//", - "-//netscape comm. corp.//dtd html//", - "-//netscape comm. corp.//dtd strict html//", - "-//o'reilly and associates//dtd html 2.0//", - "-//o'reilly and associates//dtd html extended 1.0//", - "-//o'reilly and associates//dtd html extended relaxed 1.0//", - "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", - "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", - "-//spyglass//dtd html 2.0 extended//", - "-//sq//dtd html 2.0 hotmetal + extensions//", - "-//sun microsystems corp.//dtd hotjava html//", - "-//sun microsystems corp.//dtd hotjava strict html//", - "-//w3c//dtd html 3 1995-03-24//", - "-//w3c//dtd html 3.2 draft//", - "-//w3c//dtd html 3.2 final//", - "-//w3c//dtd html 3.2//", - "-//w3c//dtd html 3.2s draft//", - "-//w3c//dtd html 4.0 frameset//", - "-//w3c//dtd html 4.0 transitional//", - "-//w3c//dtd html experimental 19960712//", - "-//w3c//dtd html experimental 970421//", - "-//w3c//dtd w3 html//", - "-//w3o//dtd w3 html 3.0//", - "-//webtechs//dtd mozilla html 2.0//", - "-//webtechs//dtd mozilla html//", -} diff --git a/vendor/golang.org/x/net/html/entity.go b/vendor/golang.org/x/net/html/entity.go deleted file mode 100644 index b628880a0..000000000 --- a/vendor/golang.org/x/net/html/entity.go +++ /dev/null @@ -1,2253 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -// All entities that do not end with ';' are 6 or fewer bytes long. -const longestEntityWithoutSemicolon = 6 - -// entity is a map from HTML entity names to their values. The semicolon matters: -// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references -// lists both "amp" and "amp;" as two separate entries. -// -// Note that the HTML5 list is larger than the HTML4 list at -// http://www.w3.org/TR/html4/sgml/entities.html -var entity = map[string]rune{ - "AElig;": '\U000000C6', - "AMP;": '\U00000026', - "Aacute;": '\U000000C1', - "Abreve;": '\U00000102', - "Acirc;": '\U000000C2', - "Acy;": '\U00000410', - "Afr;": '\U0001D504', - "Agrave;": '\U000000C0', - "Alpha;": '\U00000391', - "Amacr;": '\U00000100', - "And;": '\U00002A53', - "Aogon;": '\U00000104', - "Aopf;": '\U0001D538', - "ApplyFunction;": '\U00002061', - "Aring;": '\U000000C5', - "Ascr;": '\U0001D49C', - "Assign;": '\U00002254', - "Atilde;": '\U000000C3', - "Auml;": '\U000000C4', - "Backslash;": '\U00002216', - "Barv;": '\U00002AE7', - "Barwed;": '\U00002306', - "Bcy;": '\U00000411', - "Because;": '\U00002235', - "Bernoullis;": '\U0000212C', - "Beta;": '\U00000392', - "Bfr;": '\U0001D505', - "Bopf;": '\U0001D539', - "Breve;": '\U000002D8', - "Bscr;": '\U0000212C', - "Bumpeq;": '\U0000224E', - "CHcy;": '\U00000427', - "COPY;": '\U000000A9', - "Cacute;": '\U00000106', - "Cap;": '\U000022D2', - "CapitalDifferentialD;": '\U00002145', - "Cayleys;": '\U0000212D', - "Ccaron;": '\U0000010C', - "Ccedil;": '\U000000C7', - "Ccirc;": '\U00000108', - "Cconint;": '\U00002230', - "Cdot;": '\U0000010A', - "Cedilla;": '\U000000B8', - "CenterDot;": '\U000000B7', - "Cfr;": '\U0000212D', - "Chi;": '\U000003A7', - "CircleDot;": '\U00002299', - "CircleMinus;": '\U00002296', - "CirclePlus;": '\U00002295', - "CircleTimes;": '\U00002297', - "ClockwiseContourIntegral;": '\U00002232', - "CloseCurlyDoubleQuote;": '\U0000201D', - "CloseCurlyQuote;": '\U00002019', - "Colon;": '\U00002237', - "Colone;": '\U00002A74', - "Congruent;": '\U00002261', - "Conint;": '\U0000222F', - "ContourIntegral;": '\U0000222E', - "Copf;": '\U00002102', - "Coproduct;": '\U00002210', - "CounterClockwiseContourIntegral;": '\U00002233', - "Cross;": '\U00002A2F', - "Cscr;": '\U0001D49E', - "Cup;": '\U000022D3', - "CupCap;": '\U0000224D', - "DD;": '\U00002145', - "DDotrahd;": '\U00002911', - "DJcy;": '\U00000402', - "DScy;": '\U00000405', - "DZcy;": '\U0000040F', - "Dagger;": '\U00002021', - "Darr;": '\U000021A1', - "Dashv;": '\U00002AE4', - "Dcaron;": '\U0000010E', - "Dcy;": '\U00000414', - "Del;": '\U00002207', - "Delta;": '\U00000394', - "Dfr;": '\U0001D507', - "DiacriticalAcute;": '\U000000B4', - "DiacriticalDot;": '\U000002D9', - "DiacriticalDoubleAcute;": '\U000002DD', - "DiacriticalGrave;": '\U00000060', - "DiacriticalTilde;": '\U000002DC', - "Diamond;": '\U000022C4', - "DifferentialD;": '\U00002146', - "Dopf;": '\U0001D53B', - "Dot;": '\U000000A8', - "DotDot;": '\U000020DC', - "DotEqual;": '\U00002250', - "DoubleContourIntegral;": '\U0000222F', - "DoubleDot;": '\U000000A8', - "DoubleDownArrow;": '\U000021D3', - "DoubleLeftArrow;": '\U000021D0', - "DoubleLeftRightArrow;": '\U000021D4', - "DoubleLeftTee;": '\U00002AE4', - "DoubleLongLeftArrow;": '\U000027F8', - "DoubleLongLeftRightArrow;": '\U000027FA', - "DoubleLongRightArrow;": '\U000027F9', - "DoubleRightArrow;": '\U000021D2', - "DoubleRightTee;": '\U000022A8', - "DoubleUpArrow;": '\U000021D1', - "DoubleUpDownArrow;": '\U000021D5', - "DoubleVerticalBar;": '\U00002225', - "DownArrow;": '\U00002193', - "DownArrowBar;": '\U00002913', - "DownArrowUpArrow;": '\U000021F5', - "DownBreve;": '\U00000311', - "DownLeftRightVector;": '\U00002950', - "DownLeftTeeVector;": '\U0000295E', - "DownLeftVector;": '\U000021BD', - "DownLeftVectorBar;": '\U00002956', - "DownRightTeeVector;": '\U0000295F', - "DownRightVector;": '\U000021C1', - "DownRightVectorBar;": '\U00002957', - "DownTee;": '\U000022A4', - "DownTeeArrow;": '\U000021A7', - "Downarrow;": '\U000021D3', - "Dscr;": '\U0001D49F', - "Dstrok;": '\U00000110', - "ENG;": '\U0000014A', - "ETH;": '\U000000D0', - "Eacute;": '\U000000C9', - "Ecaron;": '\U0000011A', - "Ecirc;": '\U000000CA', - "Ecy;": '\U0000042D', - "Edot;": '\U00000116', - "Efr;": '\U0001D508', - "Egrave;": '\U000000C8', - "Element;": '\U00002208', - "Emacr;": '\U00000112', - "EmptySmallSquare;": '\U000025FB', - "EmptyVerySmallSquare;": '\U000025AB', - "Eogon;": '\U00000118', - "Eopf;": '\U0001D53C', - "Epsilon;": '\U00000395', - "Equal;": '\U00002A75', - "EqualTilde;": '\U00002242', - "Equilibrium;": '\U000021CC', - "Escr;": '\U00002130', - "Esim;": '\U00002A73', - "Eta;": '\U00000397', - "Euml;": '\U000000CB', - "Exists;": '\U00002203', - "ExponentialE;": '\U00002147', - "Fcy;": '\U00000424', - "Ffr;": '\U0001D509', - "FilledSmallSquare;": '\U000025FC', - "FilledVerySmallSquare;": '\U000025AA', - "Fopf;": '\U0001D53D', - "ForAll;": '\U00002200', - "Fouriertrf;": '\U00002131', - "Fscr;": '\U00002131', - "GJcy;": '\U00000403', - "GT;": '\U0000003E', - "Gamma;": '\U00000393', - "Gammad;": '\U000003DC', - "Gbreve;": '\U0000011E', - "Gcedil;": '\U00000122', - "Gcirc;": '\U0000011C', - "Gcy;": '\U00000413', - "Gdot;": '\U00000120', - "Gfr;": '\U0001D50A', - "Gg;": '\U000022D9', - "Gopf;": '\U0001D53E', - "GreaterEqual;": '\U00002265', - "GreaterEqualLess;": '\U000022DB', - "GreaterFullEqual;": '\U00002267', - "GreaterGreater;": '\U00002AA2', - "GreaterLess;": '\U00002277', - "GreaterSlantEqual;": '\U00002A7E', - "GreaterTilde;": '\U00002273', - "Gscr;": '\U0001D4A2', - "Gt;": '\U0000226B', - "HARDcy;": '\U0000042A', - "Hacek;": '\U000002C7', - "Hat;": '\U0000005E', - "Hcirc;": '\U00000124', - "Hfr;": '\U0000210C', - "HilbertSpace;": '\U0000210B', - "Hopf;": '\U0000210D', - "HorizontalLine;": '\U00002500', - "Hscr;": '\U0000210B', - "Hstrok;": '\U00000126', - "HumpDownHump;": '\U0000224E', - "HumpEqual;": '\U0000224F', - "IEcy;": '\U00000415', - "IJlig;": '\U00000132', - "IOcy;": '\U00000401', - "Iacute;": '\U000000CD', - "Icirc;": '\U000000CE', - "Icy;": '\U00000418', - "Idot;": '\U00000130', - "Ifr;": '\U00002111', - "Igrave;": '\U000000CC', - "Im;": '\U00002111', - "Imacr;": '\U0000012A', - "ImaginaryI;": '\U00002148', - "Implies;": '\U000021D2', - "Int;": '\U0000222C', - "Integral;": '\U0000222B', - "Intersection;": '\U000022C2', - "InvisibleComma;": '\U00002063', - "InvisibleTimes;": '\U00002062', - "Iogon;": '\U0000012E', - "Iopf;": '\U0001D540', - "Iota;": '\U00000399', - "Iscr;": '\U00002110', - "Itilde;": '\U00000128', - "Iukcy;": '\U00000406', - "Iuml;": '\U000000CF', - "Jcirc;": '\U00000134', - "Jcy;": '\U00000419', - "Jfr;": '\U0001D50D', - "Jopf;": '\U0001D541', - "Jscr;": '\U0001D4A5', - "Jsercy;": '\U00000408', - "Jukcy;": '\U00000404', - "KHcy;": '\U00000425', - "KJcy;": '\U0000040C', - "Kappa;": '\U0000039A', - "Kcedil;": '\U00000136', - "Kcy;": '\U0000041A', - "Kfr;": '\U0001D50E', - "Kopf;": '\U0001D542', - "Kscr;": '\U0001D4A6', - "LJcy;": '\U00000409', - "LT;": '\U0000003C', - "Lacute;": '\U00000139', - "Lambda;": '\U0000039B', - "Lang;": '\U000027EA', - "Laplacetrf;": '\U00002112', - "Larr;": '\U0000219E', - "Lcaron;": '\U0000013D', - "Lcedil;": '\U0000013B', - "Lcy;": '\U0000041B', - "LeftAngleBracket;": '\U000027E8', - "LeftArrow;": '\U00002190', - "LeftArrowBar;": '\U000021E4', - "LeftArrowRightArrow;": '\U000021C6', - "LeftCeiling;": '\U00002308', - "LeftDoubleBracket;": '\U000027E6', - "LeftDownTeeVector;": '\U00002961', - "LeftDownVector;": '\U000021C3', - "LeftDownVectorBar;": '\U00002959', - "LeftFloor;": '\U0000230A', - "LeftRightArrow;": '\U00002194', - "LeftRightVector;": '\U0000294E', - "LeftTee;": '\U000022A3', - "LeftTeeArrow;": '\U000021A4', - "LeftTeeVector;": '\U0000295A', - "LeftTriangle;": '\U000022B2', - "LeftTriangleBar;": '\U000029CF', - "LeftTriangleEqual;": '\U000022B4', - "LeftUpDownVector;": '\U00002951', - "LeftUpTeeVector;": '\U00002960', - "LeftUpVector;": '\U000021BF', - "LeftUpVectorBar;": '\U00002958', - "LeftVector;": '\U000021BC', - "LeftVectorBar;": '\U00002952', - "Leftarrow;": '\U000021D0', - "Leftrightarrow;": '\U000021D4', - "LessEqualGreater;": '\U000022DA', - "LessFullEqual;": '\U00002266', - "LessGreater;": '\U00002276', - "LessLess;": '\U00002AA1', - "LessSlantEqual;": '\U00002A7D', - "LessTilde;": '\U00002272', - "Lfr;": '\U0001D50F', - "Ll;": '\U000022D8', - "Lleftarrow;": '\U000021DA', - "Lmidot;": '\U0000013F', - "LongLeftArrow;": '\U000027F5', - "LongLeftRightArrow;": '\U000027F7', - "LongRightArrow;": '\U000027F6', - "Longleftarrow;": '\U000027F8', - "Longleftrightarrow;": '\U000027FA', - "Longrightarrow;": '\U000027F9', - "Lopf;": '\U0001D543', - "LowerLeftArrow;": '\U00002199', - "LowerRightArrow;": '\U00002198', - "Lscr;": '\U00002112', - "Lsh;": '\U000021B0', - "Lstrok;": '\U00000141', - "Lt;": '\U0000226A', - "Map;": '\U00002905', - "Mcy;": '\U0000041C', - "MediumSpace;": '\U0000205F', - "Mellintrf;": '\U00002133', - "Mfr;": '\U0001D510', - "MinusPlus;": '\U00002213', - "Mopf;": '\U0001D544', - "Mscr;": '\U00002133', - "Mu;": '\U0000039C', - "NJcy;": '\U0000040A', - "Nacute;": '\U00000143', - "Ncaron;": '\U00000147', - "Ncedil;": '\U00000145', - "Ncy;": '\U0000041D', - "NegativeMediumSpace;": '\U0000200B', - "NegativeThickSpace;": '\U0000200B', - "NegativeThinSpace;": '\U0000200B', - "NegativeVeryThinSpace;": '\U0000200B', - "NestedGreaterGreater;": '\U0000226B', - "NestedLessLess;": '\U0000226A', - "NewLine;": '\U0000000A', - "Nfr;": '\U0001D511', - "NoBreak;": '\U00002060', - "NonBreakingSpace;": '\U000000A0', - "Nopf;": '\U00002115', - "Not;": '\U00002AEC', - "NotCongruent;": '\U00002262', - "NotCupCap;": '\U0000226D', - "NotDoubleVerticalBar;": '\U00002226', - "NotElement;": '\U00002209', - "NotEqual;": '\U00002260', - "NotExists;": '\U00002204', - "NotGreater;": '\U0000226F', - "NotGreaterEqual;": '\U00002271', - "NotGreaterLess;": '\U00002279', - "NotGreaterTilde;": '\U00002275', - "NotLeftTriangle;": '\U000022EA', - "NotLeftTriangleEqual;": '\U000022EC', - "NotLess;": '\U0000226E', - "NotLessEqual;": '\U00002270', - "NotLessGreater;": '\U00002278', - "NotLessTilde;": '\U00002274', - "NotPrecedes;": '\U00002280', - "NotPrecedesSlantEqual;": '\U000022E0', - "NotReverseElement;": '\U0000220C', - "NotRightTriangle;": '\U000022EB', - "NotRightTriangleEqual;": '\U000022ED', - "NotSquareSubsetEqual;": '\U000022E2', - "NotSquareSupersetEqual;": '\U000022E3', - "NotSubsetEqual;": '\U00002288', - "NotSucceeds;": '\U00002281', - "NotSucceedsSlantEqual;": '\U000022E1', - "NotSupersetEqual;": '\U00002289', - "NotTilde;": '\U00002241', - "NotTildeEqual;": '\U00002244', - "NotTildeFullEqual;": '\U00002247', - "NotTildeTilde;": '\U00002249', - "NotVerticalBar;": '\U00002224', - "Nscr;": '\U0001D4A9', - "Ntilde;": '\U000000D1', - "Nu;": '\U0000039D', - "OElig;": '\U00000152', - "Oacute;": '\U000000D3', - "Ocirc;": '\U000000D4', - "Ocy;": '\U0000041E', - "Odblac;": '\U00000150', - "Ofr;": '\U0001D512', - "Ograve;": '\U000000D2', - "Omacr;": '\U0000014C', - "Omega;": '\U000003A9', - "Omicron;": '\U0000039F', - "Oopf;": '\U0001D546', - "OpenCurlyDoubleQuote;": '\U0000201C', - "OpenCurlyQuote;": '\U00002018', - "Or;": '\U00002A54', - "Oscr;": '\U0001D4AA', - "Oslash;": '\U000000D8', - "Otilde;": '\U000000D5', - "Otimes;": '\U00002A37', - "Ouml;": '\U000000D6', - "OverBar;": '\U0000203E', - "OverBrace;": '\U000023DE', - "OverBracket;": '\U000023B4', - "OverParenthesis;": '\U000023DC', - "PartialD;": '\U00002202', - "Pcy;": '\U0000041F', - "Pfr;": '\U0001D513', - "Phi;": '\U000003A6', - "Pi;": '\U000003A0', - "PlusMinus;": '\U000000B1', - "Poincareplane;": '\U0000210C', - "Popf;": '\U00002119', - "Pr;": '\U00002ABB', - "Precedes;": '\U0000227A', - "PrecedesEqual;": '\U00002AAF', - "PrecedesSlantEqual;": '\U0000227C', - "PrecedesTilde;": '\U0000227E', - "Prime;": '\U00002033', - "Product;": '\U0000220F', - "Proportion;": '\U00002237', - "Proportional;": '\U0000221D', - "Pscr;": '\U0001D4AB', - "Psi;": '\U000003A8', - "QUOT;": '\U00000022', - "Qfr;": '\U0001D514', - "Qopf;": '\U0000211A', - "Qscr;": '\U0001D4AC', - "RBarr;": '\U00002910', - "REG;": '\U000000AE', - "Racute;": '\U00000154', - "Rang;": '\U000027EB', - "Rarr;": '\U000021A0', - "Rarrtl;": '\U00002916', - "Rcaron;": '\U00000158', - "Rcedil;": '\U00000156', - "Rcy;": '\U00000420', - "Re;": '\U0000211C', - "ReverseElement;": '\U0000220B', - "ReverseEquilibrium;": '\U000021CB', - "ReverseUpEquilibrium;": '\U0000296F', - "Rfr;": '\U0000211C', - "Rho;": '\U000003A1', - "RightAngleBracket;": '\U000027E9', - "RightArrow;": '\U00002192', - "RightArrowBar;": '\U000021E5', - "RightArrowLeftArrow;": '\U000021C4', - "RightCeiling;": '\U00002309', - "RightDoubleBracket;": '\U000027E7', - "RightDownTeeVector;": '\U0000295D', - "RightDownVector;": '\U000021C2', - "RightDownVectorBar;": '\U00002955', - "RightFloor;": '\U0000230B', - "RightTee;": '\U000022A2', - "RightTeeArrow;": '\U000021A6', - "RightTeeVector;": '\U0000295B', - "RightTriangle;": '\U000022B3', - "RightTriangleBar;": '\U000029D0', - "RightTriangleEqual;": '\U000022B5', - "RightUpDownVector;": '\U0000294F', - "RightUpTeeVector;": '\U0000295C', - "RightUpVector;": '\U000021BE', - "RightUpVectorBar;": '\U00002954', - "RightVector;": '\U000021C0', - "RightVectorBar;": '\U00002953', - "Rightarrow;": '\U000021D2', - "Ropf;": '\U0000211D', - "RoundImplies;": '\U00002970', - "Rrightarrow;": '\U000021DB', - "Rscr;": '\U0000211B', - "Rsh;": '\U000021B1', - "RuleDelayed;": '\U000029F4', - "SHCHcy;": '\U00000429', - "SHcy;": '\U00000428', - "SOFTcy;": '\U0000042C', - "Sacute;": '\U0000015A', - "Sc;": '\U00002ABC', - "Scaron;": '\U00000160', - "Scedil;": '\U0000015E', - "Scirc;": '\U0000015C', - "Scy;": '\U00000421', - "Sfr;": '\U0001D516', - "ShortDownArrow;": '\U00002193', - "ShortLeftArrow;": '\U00002190', - "ShortRightArrow;": '\U00002192', - "ShortUpArrow;": '\U00002191', - "Sigma;": '\U000003A3', - "SmallCircle;": '\U00002218', - "Sopf;": '\U0001D54A', - "Sqrt;": '\U0000221A', - "Square;": '\U000025A1', - "SquareIntersection;": '\U00002293', - "SquareSubset;": '\U0000228F', - "SquareSubsetEqual;": '\U00002291', - "SquareSuperset;": '\U00002290', - "SquareSupersetEqual;": '\U00002292', - "SquareUnion;": '\U00002294', - "Sscr;": '\U0001D4AE', - "Star;": '\U000022C6', - "Sub;": '\U000022D0', - "Subset;": '\U000022D0', - "SubsetEqual;": '\U00002286', - "Succeeds;": '\U0000227B', - "SucceedsEqual;": '\U00002AB0', - "SucceedsSlantEqual;": '\U0000227D', - "SucceedsTilde;": '\U0000227F', - "SuchThat;": '\U0000220B', - "Sum;": '\U00002211', - "Sup;": '\U000022D1', - "Superset;": '\U00002283', - "SupersetEqual;": '\U00002287', - "Supset;": '\U000022D1', - "THORN;": '\U000000DE', - "TRADE;": '\U00002122', - "TSHcy;": '\U0000040B', - "TScy;": '\U00000426', - "Tab;": '\U00000009', - "Tau;": '\U000003A4', - "Tcaron;": '\U00000164', - "Tcedil;": '\U00000162', - "Tcy;": '\U00000422', - "Tfr;": '\U0001D517', - "Therefore;": '\U00002234', - "Theta;": '\U00000398', - "ThinSpace;": '\U00002009', - "Tilde;": '\U0000223C', - "TildeEqual;": '\U00002243', - "TildeFullEqual;": '\U00002245', - "TildeTilde;": '\U00002248', - "Topf;": '\U0001D54B', - "TripleDot;": '\U000020DB', - "Tscr;": '\U0001D4AF', - "Tstrok;": '\U00000166', - "Uacute;": '\U000000DA', - "Uarr;": '\U0000219F', - "Uarrocir;": '\U00002949', - "Ubrcy;": '\U0000040E', - "Ubreve;": '\U0000016C', - "Ucirc;": '\U000000DB', - "Ucy;": '\U00000423', - "Udblac;": '\U00000170', - "Ufr;": '\U0001D518', - "Ugrave;": '\U000000D9', - "Umacr;": '\U0000016A', - "UnderBar;": '\U0000005F', - "UnderBrace;": '\U000023DF', - "UnderBracket;": '\U000023B5', - "UnderParenthesis;": '\U000023DD', - "Union;": '\U000022C3', - "UnionPlus;": '\U0000228E', - "Uogon;": '\U00000172', - "Uopf;": '\U0001D54C', - "UpArrow;": '\U00002191', - "UpArrowBar;": '\U00002912', - "UpArrowDownArrow;": '\U000021C5', - "UpDownArrow;": '\U00002195', - "UpEquilibrium;": '\U0000296E', - "UpTee;": '\U000022A5', - "UpTeeArrow;": '\U000021A5', - "Uparrow;": '\U000021D1', - "Updownarrow;": '\U000021D5', - "UpperLeftArrow;": '\U00002196', - "UpperRightArrow;": '\U00002197', - "Upsi;": '\U000003D2', - "Upsilon;": '\U000003A5', - "Uring;": '\U0000016E', - "Uscr;": '\U0001D4B0', - "Utilde;": '\U00000168', - "Uuml;": '\U000000DC', - "VDash;": '\U000022AB', - "Vbar;": '\U00002AEB', - "Vcy;": '\U00000412', - "Vdash;": '\U000022A9', - "Vdashl;": '\U00002AE6', - "Vee;": '\U000022C1', - "Verbar;": '\U00002016', - "Vert;": '\U00002016', - "VerticalBar;": '\U00002223', - "VerticalLine;": '\U0000007C', - "VerticalSeparator;": '\U00002758', - "VerticalTilde;": '\U00002240', - "VeryThinSpace;": '\U0000200A', - "Vfr;": '\U0001D519', - "Vopf;": '\U0001D54D', - "Vscr;": '\U0001D4B1', - "Vvdash;": '\U000022AA', - "Wcirc;": '\U00000174', - "Wedge;": '\U000022C0', - "Wfr;": '\U0001D51A', - "Wopf;": '\U0001D54E', - "Wscr;": '\U0001D4B2', - "Xfr;": '\U0001D51B', - "Xi;": '\U0000039E', - "Xopf;": '\U0001D54F', - "Xscr;": '\U0001D4B3', - "YAcy;": '\U0000042F', - "YIcy;": '\U00000407', - "YUcy;": '\U0000042E', - "Yacute;": '\U000000DD', - "Ycirc;": '\U00000176', - "Ycy;": '\U0000042B', - "Yfr;": '\U0001D51C', - "Yopf;": '\U0001D550', - "Yscr;": '\U0001D4B4', - "Yuml;": '\U00000178', - "ZHcy;": '\U00000416', - "Zacute;": '\U00000179', - "Zcaron;": '\U0000017D', - "Zcy;": '\U00000417', - "Zdot;": '\U0000017B', - "ZeroWidthSpace;": '\U0000200B', - "Zeta;": '\U00000396', - "Zfr;": '\U00002128', - "Zopf;": '\U00002124', - "Zscr;": '\U0001D4B5', - "aacute;": '\U000000E1', - "abreve;": '\U00000103', - "ac;": '\U0000223E', - "acd;": '\U0000223F', - "acirc;": '\U000000E2', - "acute;": '\U000000B4', - "acy;": '\U00000430', - "aelig;": '\U000000E6', - "af;": '\U00002061', - "afr;": '\U0001D51E', - "agrave;": '\U000000E0', - "alefsym;": '\U00002135', - "aleph;": '\U00002135', - "alpha;": '\U000003B1', - "amacr;": '\U00000101', - "amalg;": '\U00002A3F', - "amp;": '\U00000026', - "and;": '\U00002227', - "andand;": '\U00002A55', - "andd;": '\U00002A5C', - "andslope;": '\U00002A58', - "andv;": '\U00002A5A', - "ang;": '\U00002220', - "ange;": '\U000029A4', - "angle;": '\U00002220', - "angmsd;": '\U00002221', - "angmsdaa;": '\U000029A8', - "angmsdab;": '\U000029A9', - "angmsdac;": '\U000029AA', - "angmsdad;": '\U000029AB', - "angmsdae;": '\U000029AC', - "angmsdaf;": '\U000029AD', - "angmsdag;": '\U000029AE', - "angmsdah;": '\U000029AF', - "angrt;": '\U0000221F', - "angrtvb;": '\U000022BE', - "angrtvbd;": '\U0000299D', - "angsph;": '\U00002222', - "angst;": '\U000000C5', - "angzarr;": '\U0000237C', - "aogon;": '\U00000105', - "aopf;": '\U0001D552', - "ap;": '\U00002248', - "apE;": '\U00002A70', - "apacir;": '\U00002A6F', - "ape;": '\U0000224A', - "apid;": '\U0000224B', - "apos;": '\U00000027', - "approx;": '\U00002248', - "approxeq;": '\U0000224A', - "aring;": '\U000000E5', - "ascr;": '\U0001D4B6', - "ast;": '\U0000002A', - "asymp;": '\U00002248', - "asympeq;": '\U0000224D', - "atilde;": '\U000000E3', - "auml;": '\U000000E4', - "awconint;": '\U00002233', - "awint;": '\U00002A11', - "bNot;": '\U00002AED', - "backcong;": '\U0000224C', - "backepsilon;": '\U000003F6', - "backprime;": '\U00002035', - "backsim;": '\U0000223D', - "backsimeq;": '\U000022CD', - "barvee;": '\U000022BD', - "barwed;": '\U00002305', - "barwedge;": '\U00002305', - "bbrk;": '\U000023B5', - "bbrktbrk;": '\U000023B6', - "bcong;": '\U0000224C', - "bcy;": '\U00000431', - "bdquo;": '\U0000201E', - "becaus;": '\U00002235', - "because;": '\U00002235', - "bemptyv;": '\U000029B0', - "bepsi;": '\U000003F6', - "bernou;": '\U0000212C', - "beta;": '\U000003B2', - "beth;": '\U00002136', - "between;": '\U0000226C', - "bfr;": '\U0001D51F', - "bigcap;": '\U000022C2', - "bigcirc;": '\U000025EF', - "bigcup;": '\U000022C3', - "bigodot;": '\U00002A00', - "bigoplus;": '\U00002A01', - "bigotimes;": '\U00002A02', - "bigsqcup;": '\U00002A06', - "bigstar;": '\U00002605', - "bigtriangledown;": '\U000025BD', - "bigtriangleup;": '\U000025B3', - "biguplus;": '\U00002A04', - "bigvee;": '\U000022C1', - "bigwedge;": '\U000022C0', - "bkarow;": '\U0000290D', - "blacklozenge;": '\U000029EB', - "blacksquare;": '\U000025AA', - "blacktriangle;": '\U000025B4', - "blacktriangledown;": '\U000025BE', - "blacktriangleleft;": '\U000025C2', - "blacktriangleright;": '\U000025B8', - "blank;": '\U00002423', - "blk12;": '\U00002592', - "blk14;": '\U00002591', - "blk34;": '\U00002593', - "block;": '\U00002588', - "bnot;": '\U00002310', - "bopf;": '\U0001D553', - "bot;": '\U000022A5', - "bottom;": '\U000022A5', - "bowtie;": '\U000022C8', - "boxDL;": '\U00002557', - "boxDR;": '\U00002554', - "boxDl;": '\U00002556', - "boxDr;": '\U00002553', - "boxH;": '\U00002550', - "boxHD;": '\U00002566', - "boxHU;": '\U00002569', - "boxHd;": '\U00002564', - "boxHu;": '\U00002567', - "boxUL;": '\U0000255D', - "boxUR;": '\U0000255A', - "boxUl;": '\U0000255C', - "boxUr;": '\U00002559', - "boxV;": '\U00002551', - "boxVH;": '\U0000256C', - "boxVL;": '\U00002563', - "boxVR;": '\U00002560', - "boxVh;": '\U0000256B', - "boxVl;": '\U00002562', - "boxVr;": '\U0000255F', - "boxbox;": '\U000029C9', - "boxdL;": '\U00002555', - "boxdR;": '\U00002552', - "boxdl;": '\U00002510', - "boxdr;": '\U0000250C', - "boxh;": '\U00002500', - "boxhD;": '\U00002565', - "boxhU;": '\U00002568', - "boxhd;": '\U0000252C', - "boxhu;": '\U00002534', - "boxminus;": '\U0000229F', - "boxplus;": '\U0000229E', - "boxtimes;": '\U000022A0', - "boxuL;": '\U0000255B', - "boxuR;": '\U00002558', - "boxul;": '\U00002518', - "boxur;": '\U00002514', - "boxv;": '\U00002502', - "boxvH;": '\U0000256A', - "boxvL;": '\U00002561', - "boxvR;": '\U0000255E', - "boxvh;": '\U0000253C', - "boxvl;": '\U00002524', - "boxvr;": '\U0000251C', - "bprime;": '\U00002035', - "breve;": '\U000002D8', - "brvbar;": '\U000000A6', - "bscr;": '\U0001D4B7', - "bsemi;": '\U0000204F', - "bsim;": '\U0000223D', - "bsime;": '\U000022CD', - "bsol;": '\U0000005C', - "bsolb;": '\U000029C5', - "bsolhsub;": '\U000027C8', - "bull;": '\U00002022', - "bullet;": '\U00002022', - "bump;": '\U0000224E', - "bumpE;": '\U00002AAE', - "bumpe;": '\U0000224F', - "bumpeq;": '\U0000224F', - "cacute;": '\U00000107', - "cap;": '\U00002229', - "capand;": '\U00002A44', - "capbrcup;": '\U00002A49', - "capcap;": '\U00002A4B', - "capcup;": '\U00002A47', - "capdot;": '\U00002A40', - "caret;": '\U00002041', - "caron;": '\U000002C7', - "ccaps;": '\U00002A4D', - "ccaron;": '\U0000010D', - "ccedil;": '\U000000E7', - "ccirc;": '\U00000109', - "ccups;": '\U00002A4C', - "ccupssm;": '\U00002A50', - "cdot;": '\U0000010B', - "cedil;": '\U000000B8', - "cemptyv;": '\U000029B2', - "cent;": '\U000000A2', - "centerdot;": '\U000000B7', - "cfr;": '\U0001D520', - "chcy;": '\U00000447', - "check;": '\U00002713', - "checkmark;": '\U00002713', - "chi;": '\U000003C7', - "cir;": '\U000025CB', - "cirE;": '\U000029C3', - "circ;": '\U000002C6', - "circeq;": '\U00002257', - "circlearrowleft;": '\U000021BA', - "circlearrowright;": '\U000021BB', - "circledR;": '\U000000AE', - "circledS;": '\U000024C8', - "circledast;": '\U0000229B', - "circledcirc;": '\U0000229A', - "circleddash;": '\U0000229D', - "cire;": '\U00002257', - "cirfnint;": '\U00002A10', - "cirmid;": '\U00002AEF', - "cirscir;": '\U000029C2', - "clubs;": '\U00002663', - "clubsuit;": '\U00002663', - "colon;": '\U0000003A', - "colone;": '\U00002254', - "coloneq;": '\U00002254', - "comma;": '\U0000002C', - "commat;": '\U00000040', - "comp;": '\U00002201', - "compfn;": '\U00002218', - "complement;": '\U00002201', - "complexes;": '\U00002102', - "cong;": '\U00002245', - "congdot;": '\U00002A6D', - "conint;": '\U0000222E', - "copf;": '\U0001D554', - "coprod;": '\U00002210', - "copy;": '\U000000A9', - "copysr;": '\U00002117', - "crarr;": '\U000021B5', - "cross;": '\U00002717', - "cscr;": '\U0001D4B8', - "csub;": '\U00002ACF', - "csube;": '\U00002AD1', - "csup;": '\U00002AD0', - "csupe;": '\U00002AD2', - "ctdot;": '\U000022EF', - "cudarrl;": '\U00002938', - "cudarrr;": '\U00002935', - "cuepr;": '\U000022DE', - "cuesc;": '\U000022DF', - "cularr;": '\U000021B6', - "cularrp;": '\U0000293D', - "cup;": '\U0000222A', - "cupbrcap;": '\U00002A48', - "cupcap;": '\U00002A46', - "cupcup;": '\U00002A4A', - "cupdot;": '\U0000228D', - "cupor;": '\U00002A45', - "curarr;": '\U000021B7', - "curarrm;": '\U0000293C', - "curlyeqprec;": '\U000022DE', - "curlyeqsucc;": '\U000022DF', - "curlyvee;": '\U000022CE', - "curlywedge;": '\U000022CF', - "curren;": '\U000000A4', - "curvearrowleft;": '\U000021B6', - "curvearrowright;": '\U000021B7', - "cuvee;": '\U000022CE', - "cuwed;": '\U000022CF', - "cwconint;": '\U00002232', - "cwint;": '\U00002231', - "cylcty;": '\U0000232D', - "dArr;": '\U000021D3', - "dHar;": '\U00002965', - "dagger;": '\U00002020', - "daleth;": '\U00002138', - "darr;": '\U00002193', - "dash;": '\U00002010', - "dashv;": '\U000022A3', - "dbkarow;": '\U0000290F', - "dblac;": '\U000002DD', - "dcaron;": '\U0000010F', - "dcy;": '\U00000434', - "dd;": '\U00002146', - "ddagger;": '\U00002021', - "ddarr;": '\U000021CA', - "ddotseq;": '\U00002A77', - "deg;": '\U000000B0', - "delta;": '\U000003B4', - "demptyv;": '\U000029B1', - "dfisht;": '\U0000297F', - "dfr;": '\U0001D521', - "dharl;": '\U000021C3', - "dharr;": '\U000021C2', - "diam;": '\U000022C4', - "diamond;": '\U000022C4', - "diamondsuit;": '\U00002666', - "diams;": '\U00002666', - "die;": '\U000000A8', - "digamma;": '\U000003DD', - "disin;": '\U000022F2', - "div;": '\U000000F7', - "divide;": '\U000000F7', - "divideontimes;": '\U000022C7', - "divonx;": '\U000022C7', - "djcy;": '\U00000452', - "dlcorn;": '\U0000231E', - "dlcrop;": '\U0000230D', - "dollar;": '\U00000024', - "dopf;": '\U0001D555', - "dot;": '\U000002D9', - "doteq;": '\U00002250', - "doteqdot;": '\U00002251', - "dotminus;": '\U00002238', - "dotplus;": '\U00002214', - "dotsquare;": '\U000022A1', - "doublebarwedge;": '\U00002306', - "downarrow;": '\U00002193', - "downdownarrows;": '\U000021CA', - "downharpoonleft;": '\U000021C3', - "downharpoonright;": '\U000021C2', - "drbkarow;": '\U00002910', - "drcorn;": '\U0000231F', - "drcrop;": '\U0000230C', - "dscr;": '\U0001D4B9', - "dscy;": '\U00000455', - "dsol;": '\U000029F6', - "dstrok;": '\U00000111', - "dtdot;": '\U000022F1', - "dtri;": '\U000025BF', - "dtrif;": '\U000025BE', - "duarr;": '\U000021F5', - "duhar;": '\U0000296F', - "dwangle;": '\U000029A6', - "dzcy;": '\U0000045F', - "dzigrarr;": '\U000027FF', - "eDDot;": '\U00002A77', - "eDot;": '\U00002251', - "eacute;": '\U000000E9', - "easter;": '\U00002A6E', - "ecaron;": '\U0000011B', - "ecir;": '\U00002256', - "ecirc;": '\U000000EA', - "ecolon;": '\U00002255', - "ecy;": '\U0000044D', - "edot;": '\U00000117', - "ee;": '\U00002147', - "efDot;": '\U00002252', - "efr;": '\U0001D522', - "eg;": '\U00002A9A', - "egrave;": '\U000000E8', - "egs;": '\U00002A96', - "egsdot;": '\U00002A98', - "el;": '\U00002A99', - "elinters;": '\U000023E7', - "ell;": '\U00002113', - "els;": '\U00002A95', - "elsdot;": '\U00002A97', - "emacr;": '\U00000113', - "empty;": '\U00002205', - "emptyset;": '\U00002205', - "emptyv;": '\U00002205', - "emsp;": '\U00002003', - "emsp13;": '\U00002004', - "emsp14;": '\U00002005', - "eng;": '\U0000014B', - "ensp;": '\U00002002', - "eogon;": '\U00000119', - "eopf;": '\U0001D556', - "epar;": '\U000022D5', - "eparsl;": '\U000029E3', - "eplus;": '\U00002A71', - "epsi;": '\U000003B5', - "epsilon;": '\U000003B5', - "epsiv;": '\U000003F5', - "eqcirc;": '\U00002256', - "eqcolon;": '\U00002255', - "eqsim;": '\U00002242', - "eqslantgtr;": '\U00002A96', - "eqslantless;": '\U00002A95', - "equals;": '\U0000003D', - "equest;": '\U0000225F', - "equiv;": '\U00002261', - "equivDD;": '\U00002A78', - "eqvparsl;": '\U000029E5', - "erDot;": '\U00002253', - "erarr;": '\U00002971', - "escr;": '\U0000212F', - "esdot;": '\U00002250', - "esim;": '\U00002242', - "eta;": '\U000003B7', - "eth;": '\U000000F0', - "euml;": '\U000000EB', - "euro;": '\U000020AC', - "excl;": '\U00000021', - "exist;": '\U00002203', - "expectation;": '\U00002130', - "exponentiale;": '\U00002147', - "fallingdotseq;": '\U00002252', - "fcy;": '\U00000444', - "female;": '\U00002640', - "ffilig;": '\U0000FB03', - "fflig;": '\U0000FB00', - "ffllig;": '\U0000FB04', - "ffr;": '\U0001D523', - "filig;": '\U0000FB01', - "flat;": '\U0000266D', - "fllig;": '\U0000FB02', - "fltns;": '\U000025B1', - "fnof;": '\U00000192', - "fopf;": '\U0001D557', - "forall;": '\U00002200', - "fork;": '\U000022D4', - "forkv;": '\U00002AD9', - "fpartint;": '\U00002A0D', - "frac12;": '\U000000BD', - "frac13;": '\U00002153', - "frac14;": '\U000000BC', - "frac15;": '\U00002155', - "frac16;": '\U00002159', - "frac18;": '\U0000215B', - "frac23;": '\U00002154', - "frac25;": '\U00002156', - "frac34;": '\U000000BE', - "frac35;": '\U00002157', - "frac38;": '\U0000215C', - "frac45;": '\U00002158', - "frac56;": '\U0000215A', - "frac58;": '\U0000215D', - "frac78;": '\U0000215E', - "frasl;": '\U00002044', - "frown;": '\U00002322', - "fscr;": '\U0001D4BB', - "gE;": '\U00002267', - "gEl;": '\U00002A8C', - "gacute;": '\U000001F5', - "gamma;": '\U000003B3', - "gammad;": '\U000003DD', - "gap;": '\U00002A86', - "gbreve;": '\U0000011F', - "gcirc;": '\U0000011D', - "gcy;": '\U00000433', - "gdot;": '\U00000121', - "ge;": '\U00002265', - "gel;": '\U000022DB', - "geq;": '\U00002265', - "geqq;": '\U00002267', - "geqslant;": '\U00002A7E', - "ges;": '\U00002A7E', - "gescc;": '\U00002AA9', - "gesdot;": '\U00002A80', - "gesdoto;": '\U00002A82', - "gesdotol;": '\U00002A84', - "gesles;": '\U00002A94', - "gfr;": '\U0001D524', - "gg;": '\U0000226B', - "ggg;": '\U000022D9', - "gimel;": '\U00002137', - "gjcy;": '\U00000453', - "gl;": '\U00002277', - "glE;": '\U00002A92', - "gla;": '\U00002AA5', - "glj;": '\U00002AA4', - "gnE;": '\U00002269', - "gnap;": '\U00002A8A', - "gnapprox;": '\U00002A8A', - "gne;": '\U00002A88', - "gneq;": '\U00002A88', - "gneqq;": '\U00002269', - "gnsim;": '\U000022E7', - "gopf;": '\U0001D558', - "grave;": '\U00000060', - "gscr;": '\U0000210A', - "gsim;": '\U00002273', - "gsime;": '\U00002A8E', - "gsiml;": '\U00002A90', - "gt;": '\U0000003E', - "gtcc;": '\U00002AA7', - "gtcir;": '\U00002A7A', - "gtdot;": '\U000022D7', - "gtlPar;": '\U00002995', - "gtquest;": '\U00002A7C', - "gtrapprox;": '\U00002A86', - "gtrarr;": '\U00002978', - "gtrdot;": '\U000022D7', - "gtreqless;": '\U000022DB', - "gtreqqless;": '\U00002A8C', - "gtrless;": '\U00002277', - "gtrsim;": '\U00002273', - "hArr;": '\U000021D4', - "hairsp;": '\U0000200A', - "half;": '\U000000BD', - "hamilt;": '\U0000210B', - "hardcy;": '\U0000044A', - "harr;": '\U00002194', - "harrcir;": '\U00002948', - "harrw;": '\U000021AD', - "hbar;": '\U0000210F', - "hcirc;": '\U00000125', - "hearts;": '\U00002665', - "heartsuit;": '\U00002665', - "hellip;": '\U00002026', - "hercon;": '\U000022B9', - "hfr;": '\U0001D525', - "hksearow;": '\U00002925', - "hkswarow;": '\U00002926', - "hoarr;": '\U000021FF', - "homtht;": '\U0000223B', - "hookleftarrow;": '\U000021A9', - "hookrightarrow;": '\U000021AA', - "hopf;": '\U0001D559', - "horbar;": '\U00002015', - "hscr;": '\U0001D4BD', - "hslash;": '\U0000210F', - "hstrok;": '\U00000127', - "hybull;": '\U00002043', - "hyphen;": '\U00002010', - "iacute;": '\U000000ED', - "ic;": '\U00002063', - "icirc;": '\U000000EE', - "icy;": '\U00000438', - "iecy;": '\U00000435', - "iexcl;": '\U000000A1', - "iff;": '\U000021D4', - "ifr;": '\U0001D526', - "igrave;": '\U000000EC', - "ii;": '\U00002148', - "iiiint;": '\U00002A0C', - "iiint;": '\U0000222D', - "iinfin;": '\U000029DC', - "iiota;": '\U00002129', - "ijlig;": '\U00000133', - "imacr;": '\U0000012B', - "image;": '\U00002111', - "imagline;": '\U00002110', - "imagpart;": '\U00002111', - "imath;": '\U00000131', - "imof;": '\U000022B7', - "imped;": '\U000001B5', - "in;": '\U00002208', - "incare;": '\U00002105', - "infin;": '\U0000221E', - "infintie;": '\U000029DD', - "inodot;": '\U00000131', - "int;": '\U0000222B', - "intcal;": '\U000022BA', - "integers;": '\U00002124', - "intercal;": '\U000022BA', - "intlarhk;": '\U00002A17', - "intprod;": '\U00002A3C', - "iocy;": '\U00000451', - "iogon;": '\U0000012F', - "iopf;": '\U0001D55A', - "iota;": '\U000003B9', - "iprod;": '\U00002A3C', - "iquest;": '\U000000BF', - "iscr;": '\U0001D4BE', - "isin;": '\U00002208', - "isinE;": '\U000022F9', - "isindot;": '\U000022F5', - "isins;": '\U000022F4', - "isinsv;": '\U000022F3', - "isinv;": '\U00002208', - "it;": '\U00002062', - "itilde;": '\U00000129', - "iukcy;": '\U00000456', - "iuml;": '\U000000EF', - "jcirc;": '\U00000135', - "jcy;": '\U00000439', - "jfr;": '\U0001D527', - "jmath;": '\U00000237', - "jopf;": '\U0001D55B', - "jscr;": '\U0001D4BF', - "jsercy;": '\U00000458', - "jukcy;": '\U00000454', - "kappa;": '\U000003BA', - "kappav;": '\U000003F0', - "kcedil;": '\U00000137', - "kcy;": '\U0000043A', - "kfr;": '\U0001D528', - "kgreen;": '\U00000138', - "khcy;": '\U00000445', - "kjcy;": '\U0000045C', - "kopf;": '\U0001D55C', - "kscr;": '\U0001D4C0', - "lAarr;": '\U000021DA', - "lArr;": '\U000021D0', - "lAtail;": '\U0000291B', - "lBarr;": '\U0000290E', - "lE;": '\U00002266', - "lEg;": '\U00002A8B', - "lHar;": '\U00002962', - "lacute;": '\U0000013A', - "laemptyv;": '\U000029B4', - "lagran;": '\U00002112', - "lambda;": '\U000003BB', - "lang;": '\U000027E8', - "langd;": '\U00002991', - "langle;": '\U000027E8', - "lap;": '\U00002A85', - "laquo;": '\U000000AB', - "larr;": '\U00002190', - "larrb;": '\U000021E4', - "larrbfs;": '\U0000291F', - "larrfs;": '\U0000291D', - "larrhk;": '\U000021A9', - "larrlp;": '\U000021AB', - "larrpl;": '\U00002939', - "larrsim;": '\U00002973', - "larrtl;": '\U000021A2', - "lat;": '\U00002AAB', - "latail;": '\U00002919', - "late;": '\U00002AAD', - "lbarr;": '\U0000290C', - "lbbrk;": '\U00002772', - "lbrace;": '\U0000007B', - "lbrack;": '\U0000005B', - "lbrke;": '\U0000298B', - "lbrksld;": '\U0000298F', - "lbrkslu;": '\U0000298D', - "lcaron;": '\U0000013E', - "lcedil;": '\U0000013C', - "lceil;": '\U00002308', - "lcub;": '\U0000007B', - "lcy;": '\U0000043B', - "ldca;": '\U00002936', - "ldquo;": '\U0000201C', - "ldquor;": '\U0000201E', - "ldrdhar;": '\U00002967', - "ldrushar;": '\U0000294B', - "ldsh;": '\U000021B2', - "le;": '\U00002264', - "leftarrow;": '\U00002190', - "leftarrowtail;": '\U000021A2', - "leftharpoondown;": '\U000021BD', - "leftharpoonup;": '\U000021BC', - "leftleftarrows;": '\U000021C7', - "leftrightarrow;": '\U00002194', - "leftrightarrows;": '\U000021C6', - "leftrightharpoons;": '\U000021CB', - "leftrightsquigarrow;": '\U000021AD', - "leftthreetimes;": '\U000022CB', - "leg;": '\U000022DA', - "leq;": '\U00002264', - "leqq;": '\U00002266', - "leqslant;": '\U00002A7D', - "les;": '\U00002A7D', - "lescc;": '\U00002AA8', - "lesdot;": '\U00002A7F', - "lesdoto;": '\U00002A81', - "lesdotor;": '\U00002A83', - "lesges;": '\U00002A93', - "lessapprox;": '\U00002A85', - "lessdot;": '\U000022D6', - "lesseqgtr;": '\U000022DA', - "lesseqqgtr;": '\U00002A8B', - "lessgtr;": '\U00002276', - "lesssim;": '\U00002272', - "lfisht;": '\U0000297C', - "lfloor;": '\U0000230A', - "lfr;": '\U0001D529', - "lg;": '\U00002276', - "lgE;": '\U00002A91', - "lhard;": '\U000021BD', - "lharu;": '\U000021BC', - "lharul;": '\U0000296A', - "lhblk;": '\U00002584', - "ljcy;": '\U00000459', - "ll;": '\U0000226A', - "llarr;": '\U000021C7', - "llcorner;": '\U0000231E', - "llhard;": '\U0000296B', - "lltri;": '\U000025FA', - "lmidot;": '\U00000140', - "lmoust;": '\U000023B0', - "lmoustache;": '\U000023B0', - "lnE;": '\U00002268', - "lnap;": '\U00002A89', - "lnapprox;": '\U00002A89', - "lne;": '\U00002A87', - "lneq;": '\U00002A87', - "lneqq;": '\U00002268', - "lnsim;": '\U000022E6', - "loang;": '\U000027EC', - "loarr;": '\U000021FD', - "lobrk;": '\U000027E6', - "longleftarrow;": '\U000027F5', - "longleftrightarrow;": '\U000027F7', - "longmapsto;": '\U000027FC', - "longrightarrow;": '\U000027F6', - "looparrowleft;": '\U000021AB', - "looparrowright;": '\U000021AC', - "lopar;": '\U00002985', - "lopf;": '\U0001D55D', - "loplus;": '\U00002A2D', - "lotimes;": '\U00002A34', - "lowast;": '\U00002217', - "lowbar;": '\U0000005F', - "loz;": '\U000025CA', - "lozenge;": '\U000025CA', - "lozf;": '\U000029EB', - "lpar;": '\U00000028', - "lparlt;": '\U00002993', - "lrarr;": '\U000021C6', - "lrcorner;": '\U0000231F', - "lrhar;": '\U000021CB', - "lrhard;": '\U0000296D', - "lrm;": '\U0000200E', - "lrtri;": '\U000022BF', - "lsaquo;": '\U00002039', - "lscr;": '\U0001D4C1', - "lsh;": '\U000021B0', - "lsim;": '\U00002272', - "lsime;": '\U00002A8D', - "lsimg;": '\U00002A8F', - "lsqb;": '\U0000005B', - "lsquo;": '\U00002018', - "lsquor;": '\U0000201A', - "lstrok;": '\U00000142', - "lt;": '\U0000003C', - "ltcc;": '\U00002AA6', - "ltcir;": '\U00002A79', - "ltdot;": '\U000022D6', - "lthree;": '\U000022CB', - "ltimes;": '\U000022C9', - "ltlarr;": '\U00002976', - "ltquest;": '\U00002A7B', - "ltrPar;": '\U00002996', - "ltri;": '\U000025C3', - "ltrie;": '\U000022B4', - "ltrif;": '\U000025C2', - "lurdshar;": '\U0000294A', - "luruhar;": '\U00002966', - "mDDot;": '\U0000223A', - "macr;": '\U000000AF', - "male;": '\U00002642', - "malt;": '\U00002720', - "maltese;": '\U00002720', - "map;": '\U000021A6', - "mapsto;": '\U000021A6', - "mapstodown;": '\U000021A7', - "mapstoleft;": '\U000021A4', - "mapstoup;": '\U000021A5', - "marker;": '\U000025AE', - "mcomma;": '\U00002A29', - "mcy;": '\U0000043C', - "mdash;": '\U00002014', - "measuredangle;": '\U00002221', - "mfr;": '\U0001D52A', - "mho;": '\U00002127', - "micro;": '\U000000B5', - "mid;": '\U00002223', - "midast;": '\U0000002A', - "midcir;": '\U00002AF0', - "middot;": '\U000000B7', - "minus;": '\U00002212', - "minusb;": '\U0000229F', - "minusd;": '\U00002238', - "minusdu;": '\U00002A2A', - "mlcp;": '\U00002ADB', - "mldr;": '\U00002026', - "mnplus;": '\U00002213', - "models;": '\U000022A7', - "mopf;": '\U0001D55E', - "mp;": '\U00002213', - "mscr;": '\U0001D4C2', - "mstpos;": '\U0000223E', - "mu;": '\U000003BC', - "multimap;": '\U000022B8', - "mumap;": '\U000022B8', - "nLeftarrow;": '\U000021CD', - "nLeftrightarrow;": '\U000021CE', - "nRightarrow;": '\U000021CF', - "nVDash;": '\U000022AF', - "nVdash;": '\U000022AE', - "nabla;": '\U00002207', - "nacute;": '\U00000144', - "nap;": '\U00002249', - "napos;": '\U00000149', - "napprox;": '\U00002249', - "natur;": '\U0000266E', - "natural;": '\U0000266E', - "naturals;": '\U00002115', - "nbsp;": '\U000000A0', - "ncap;": '\U00002A43', - "ncaron;": '\U00000148', - "ncedil;": '\U00000146', - "ncong;": '\U00002247', - "ncup;": '\U00002A42', - "ncy;": '\U0000043D', - "ndash;": '\U00002013', - "ne;": '\U00002260', - "neArr;": '\U000021D7', - "nearhk;": '\U00002924', - "nearr;": '\U00002197', - "nearrow;": '\U00002197', - "nequiv;": '\U00002262', - "nesear;": '\U00002928', - "nexist;": '\U00002204', - "nexists;": '\U00002204', - "nfr;": '\U0001D52B', - "nge;": '\U00002271', - "ngeq;": '\U00002271', - "ngsim;": '\U00002275', - "ngt;": '\U0000226F', - "ngtr;": '\U0000226F', - "nhArr;": '\U000021CE', - "nharr;": '\U000021AE', - "nhpar;": '\U00002AF2', - "ni;": '\U0000220B', - "nis;": '\U000022FC', - "nisd;": '\U000022FA', - "niv;": '\U0000220B', - "njcy;": '\U0000045A', - "nlArr;": '\U000021CD', - "nlarr;": '\U0000219A', - "nldr;": '\U00002025', - "nle;": '\U00002270', - "nleftarrow;": '\U0000219A', - "nleftrightarrow;": '\U000021AE', - "nleq;": '\U00002270', - "nless;": '\U0000226E', - "nlsim;": '\U00002274', - "nlt;": '\U0000226E', - "nltri;": '\U000022EA', - "nltrie;": '\U000022EC', - "nmid;": '\U00002224', - "nopf;": '\U0001D55F', - "not;": '\U000000AC', - "notin;": '\U00002209', - "notinva;": '\U00002209', - "notinvb;": '\U000022F7', - "notinvc;": '\U000022F6', - "notni;": '\U0000220C', - "notniva;": '\U0000220C', - "notnivb;": '\U000022FE', - "notnivc;": '\U000022FD', - "npar;": '\U00002226', - "nparallel;": '\U00002226', - "npolint;": '\U00002A14', - "npr;": '\U00002280', - "nprcue;": '\U000022E0', - "nprec;": '\U00002280', - "nrArr;": '\U000021CF', - "nrarr;": '\U0000219B', - "nrightarrow;": '\U0000219B', - "nrtri;": '\U000022EB', - "nrtrie;": '\U000022ED', - "nsc;": '\U00002281', - "nsccue;": '\U000022E1', - "nscr;": '\U0001D4C3', - "nshortmid;": '\U00002224', - "nshortparallel;": '\U00002226', - "nsim;": '\U00002241', - "nsime;": '\U00002244', - "nsimeq;": '\U00002244', - "nsmid;": '\U00002224', - "nspar;": '\U00002226', - "nsqsube;": '\U000022E2', - "nsqsupe;": '\U000022E3', - "nsub;": '\U00002284', - "nsube;": '\U00002288', - "nsubseteq;": '\U00002288', - "nsucc;": '\U00002281', - "nsup;": '\U00002285', - "nsupe;": '\U00002289', - "nsupseteq;": '\U00002289', - "ntgl;": '\U00002279', - "ntilde;": '\U000000F1', - "ntlg;": '\U00002278', - "ntriangleleft;": '\U000022EA', - "ntrianglelefteq;": '\U000022EC', - "ntriangleright;": '\U000022EB', - "ntrianglerighteq;": '\U000022ED', - "nu;": '\U000003BD', - "num;": '\U00000023', - "numero;": '\U00002116', - "numsp;": '\U00002007', - "nvDash;": '\U000022AD', - "nvHarr;": '\U00002904', - "nvdash;": '\U000022AC', - "nvinfin;": '\U000029DE', - "nvlArr;": '\U00002902', - "nvrArr;": '\U00002903', - "nwArr;": '\U000021D6', - "nwarhk;": '\U00002923', - "nwarr;": '\U00002196', - "nwarrow;": '\U00002196', - "nwnear;": '\U00002927', - "oS;": '\U000024C8', - "oacute;": '\U000000F3', - "oast;": '\U0000229B', - "ocir;": '\U0000229A', - "ocirc;": '\U000000F4', - "ocy;": '\U0000043E', - "odash;": '\U0000229D', - "odblac;": '\U00000151', - "odiv;": '\U00002A38', - "odot;": '\U00002299', - "odsold;": '\U000029BC', - "oelig;": '\U00000153', - "ofcir;": '\U000029BF', - "ofr;": '\U0001D52C', - "ogon;": '\U000002DB', - "ograve;": '\U000000F2', - "ogt;": '\U000029C1', - "ohbar;": '\U000029B5', - "ohm;": '\U000003A9', - "oint;": '\U0000222E', - "olarr;": '\U000021BA', - "olcir;": '\U000029BE', - "olcross;": '\U000029BB', - "oline;": '\U0000203E', - "olt;": '\U000029C0', - "omacr;": '\U0000014D', - "omega;": '\U000003C9', - "omicron;": '\U000003BF', - "omid;": '\U000029B6', - "ominus;": '\U00002296', - "oopf;": '\U0001D560', - "opar;": '\U000029B7', - "operp;": '\U000029B9', - "oplus;": '\U00002295', - "or;": '\U00002228', - "orarr;": '\U000021BB', - "ord;": '\U00002A5D', - "order;": '\U00002134', - "orderof;": '\U00002134', - "ordf;": '\U000000AA', - "ordm;": '\U000000BA', - "origof;": '\U000022B6', - "oror;": '\U00002A56', - "orslope;": '\U00002A57', - "orv;": '\U00002A5B', - "oscr;": '\U00002134', - "oslash;": '\U000000F8', - "osol;": '\U00002298', - "otilde;": '\U000000F5', - "otimes;": '\U00002297', - "otimesas;": '\U00002A36', - "ouml;": '\U000000F6', - "ovbar;": '\U0000233D', - "par;": '\U00002225', - "para;": '\U000000B6', - "parallel;": '\U00002225', - "parsim;": '\U00002AF3', - "parsl;": '\U00002AFD', - "part;": '\U00002202', - "pcy;": '\U0000043F', - "percnt;": '\U00000025', - "period;": '\U0000002E', - "permil;": '\U00002030', - "perp;": '\U000022A5', - "pertenk;": '\U00002031', - "pfr;": '\U0001D52D', - "phi;": '\U000003C6', - "phiv;": '\U000003D5', - "phmmat;": '\U00002133', - "phone;": '\U0000260E', - "pi;": '\U000003C0', - "pitchfork;": '\U000022D4', - "piv;": '\U000003D6', - "planck;": '\U0000210F', - "planckh;": '\U0000210E', - "plankv;": '\U0000210F', - "plus;": '\U0000002B', - "plusacir;": '\U00002A23', - "plusb;": '\U0000229E', - "pluscir;": '\U00002A22', - "plusdo;": '\U00002214', - "plusdu;": '\U00002A25', - "pluse;": '\U00002A72', - "plusmn;": '\U000000B1', - "plussim;": '\U00002A26', - "plustwo;": '\U00002A27', - "pm;": '\U000000B1', - "pointint;": '\U00002A15', - "popf;": '\U0001D561', - "pound;": '\U000000A3', - "pr;": '\U0000227A', - "prE;": '\U00002AB3', - "prap;": '\U00002AB7', - "prcue;": '\U0000227C', - "pre;": '\U00002AAF', - "prec;": '\U0000227A', - "precapprox;": '\U00002AB7', - "preccurlyeq;": '\U0000227C', - "preceq;": '\U00002AAF', - "precnapprox;": '\U00002AB9', - "precneqq;": '\U00002AB5', - "precnsim;": '\U000022E8', - "precsim;": '\U0000227E', - "prime;": '\U00002032', - "primes;": '\U00002119', - "prnE;": '\U00002AB5', - "prnap;": '\U00002AB9', - "prnsim;": '\U000022E8', - "prod;": '\U0000220F', - "profalar;": '\U0000232E', - "profline;": '\U00002312', - "profsurf;": '\U00002313', - "prop;": '\U0000221D', - "propto;": '\U0000221D', - "prsim;": '\U0000227E', - "prurel;": '\U000022B0', - "pscr;": '\U0001D4C5', - "psi;": '\U000003C8', - "puncsp;": '\U00002008', - "qfr;": '\U0001D52E', - "qint;": '\U00002A0C', - "qopf;": '\U0001D562', - "qprime;": '\U00002057', - "qscr;": '\U0001D4C6', - "quaternions;": '\U0000210D', - "quatint;": '\U00002A16', - "quest;": '\U0000003F', - "questeq;": '\U0000225F', - "quot;": '\U00000022', - "rAarr;": '\U000021DB', - "rArr;": '\U000021D2', - "rAtail;": '\U0000291C', - "rBarr;": '\U0000290F', - "rHar;": '\U00002964', - "racute;": '\U00000155', - "radic;": '\U0000221A', - "raemptyv;": '\U000029B3', - "rang;": '\U000027E9', - "rangd;": '\U00002992', - "range;": '\U000029A5', - "rangle;": '\U000027E9', - "raquo;": '\U000000BB', - "rarr;": '\U00002192', - "rarrap;": '\U00002975', - "rarrb;": '\U000021E5', - "rarrbfs;": '\U00002920', - "rarrc;": '\U00002933', - "rarrfs;": '\U0000291E', - "rarrhk;": '\U000021AA', - "rarrlp;": '\U000021AC', - "rarrpl;": '\U00002945', - "rarrsim;": '\U00002974', - "rarrtl;": '\U000021A3', - "rarrw;": '\U0000219D', - "ratail;": '\U0000291A', - "ratio;": '\U00002236', - "rationals;": '\U0000211A', - "rbarr;": '\U0000290D', - "rbbrk;": '\U00002773', - "rbrace;": '\U0000007D', - "rbrack;": '\U0000005D', - "rbrke;": '\U0000298C', - "rbrksld;": '\U0000298E', - "rbrkslu;": '\U00002990', - "rcaron;": '\U00000159', - "rcedil;": '\U00000157', - "rceil;": '\U00002309', - "rcub;": '\U0000007D', - "rcy;": '\U00000440', - "rdca;": '\U00002937', - "rdldhar;": '\U00002969', - "rdquo;": '\U0000201D', - "rdquor;": '\U0000201D', - "rdsh;": '\U000021B3', - "real;": '\U0000211C', - "realine;": '\U0000211B', - "realpart;": '\U0000211C', - "reals;": '\U0000211D', - "rect;": '\U000025AD', - "reg;": '\U000000AE', - "rfisht;": '\U0000297D', - "rfloor;": '\U0000230B', - "rfr;": '\U0001D52F', - "rhard;": '\U000021C1', - "rharu;": '\U000021C0', - "rharul;": '\U0000296C', - "rho;": '\U000003C1', - "rhov;": '\U000003F1', - "rightarrow;": '\U00002192', - "rightarrowtail;": '\U000021A3', - "rightharpoondown;": '\U000021C1', - "rightharpoonup;": '\U000021C0', - "rightleftarrows;": '\U000021C4', - "rightleftharpoons;": '\U000021CC', - "rightrightarrows;": '\U000021C9', - "rightsquigarrow;": '\U0000219D', - "rightthreetimes;": '\U000022CC', - "ring;": '\U000002DA', - "risingdotseq;": '\U00002253', - "rlarr;": '\U000021C4', - "rlhar;": '\U000021CC', - "rlm;": '\U0000200F', - "rmoust;": '\U000023B1', - "rmoustache;": '\U000023B1', - "rnmid;": '\U00002AEE', - "roang;": '\U000027ED', - "roarr;": '\U000021FE', - "robrk;": '\U000027E7', - "ropar;": '\U00002986', - "ropf;": '\U0001D563', - "roplus;": '\U00002A2E', - "rotimes;": '\U00002A35', - "rpar;": '\U00000029', - "rpargt;": '\U00002994', - "rppolint;": '\U00002A12', - "rrarr;": '\U000021C9', - "rsaquo;": '\U0000203A', - "rscr;": '\U0001D4C7', - "rsh;": '\U000021B1', - "rsqb;": '\U0000005D', - "rsquo;": '\U00002019', - "rsquor;": '\U00002019', - "rthree;": '\U000022CC', - "rtimes;": '\U000022CA', - "rtri;": '\U000025B9', - "rtrie;": '\U000022B5', - "rtrif;": '\U000025B8', - "rtriltri;": '\U000029CE', - "ruluhar;": '\U00002968', - "rx;": '\U0000211E', - "sacute;": '\U0000015B', - "sbquo;": '\U0000201A', - "sc;": '\U0000227B', - "scE;": '\U00002AB4', - "scap;": '\U00002AB8', - "scaron;": '\U00000161', - "sccue;": '\U0000227D', - "sce;": '\U00002AB0', - "scedil;": '\U0000015F', - "scirc;": '\U0000015D', - "scnE;": '\U00002AB6', - "scnap;": '\U00002ABA', - "scnsim;": '\U000022E9', - "scpolint;": '\U00002A13', - "scsim;": '\U0000227F', - "scy;": '\U00000441', - "sdot;": '\U000022C5', - "sdotb;": '\U000022A1', - "sdote;": '\U00002A66', - "seArr;": '\U000021D8', - "searhk;": '\U00002925', - "searr;": '\U00002198', - "searrow;": '\U00002198', - "sect;": '\U000000A7', - "semi;": '\U0000003B', - "seswar;": '\U00002929', - "setminus;": '\U00002216', - "setmn;": '\U00002216', - "sext;": '\U00002736', - "sfr;": '\U0001D530', - "sfrown;": '\U00002322', - "sharp;": '\U0000266F', - "shchcy;": '\U00000449', - "shcy;": '\U00000448', - "shortmid;": '\U00002223', - "shortparallel;": '\U00002225', - "shy;": '\U000000AD', - "sigma;": '\U000003C3', - "sigmaf;": '\U000003C2', - "sigmav;": '\U000003C2', - "sim;": '\U0000223C', - "simdot;": '\U00002A6A', - "sime;": '\U00002243', - "simeq;": '\U00002243', - "simg;": '\U00002A9E', - "simgE;": '\U00002AA0', - "siml;": '\U00002A9D', - "simlE;": '\U00002A9F', - "simne;": '\U00002246', - "simplus;": '\U00002A24', - "simrarr;": '\U00002972', - "slarr;": '\U00002190', - "smallsetminus;": '\U00002216', - "smashp;": '\U00002A33', - "smeparsl;": '\U000029E4', - "smid;": '\U00002223', - "smile;": '\U00002323', - "smt;": '\U00002AAA', - "smte;": '\U00002AAC', - "softcy;": '\U0000044C', - "sol;": '\U0000002F', - "solb;": '\U000029C4', - "solbar;": '\U0000233F', - "sopf;": '\U0001D564', - "spades;": '\U00002660', - "spadesuit;": '\U00002660', - "spar;": '\U00002225', - "sqcap;": '\U00002293', - "sqcup;": '\U00002294', - "sqsub;": '\U0000228F', - "sqsube;": '\U00002291', - "sqsubset;": '\U0000228F', - "sqsubseteq;": '\U00002291', - "sqsup;": '\U00002290', - "sqsupe;": '\U00002292', - "sqsupset;": '\U00002290', - "sqsupseteq;": '\U00002292', - "squ;": '\U000025A1', - "square;": '\U000025A1', - "squarf;": '\U000025AA', - "squf;": '\U000025AA', - "srarr;": '\U00002192', - "sscr;": '\U0001D4C8', - "ssetmn;": '\U00002216', - "ssmile;": '\U00002323', - "sstarf;": '\U000022C6', - "star;": '\U00002606', - "starf;": '\U00002605', - "straightepsilon;": '\U000003F5', - "straightphi;": '\U000003D5', - "strns;": '\U000000AF', - "sub;": '\U00002282', - "subE;": '\U00002AC5', - "subdot;": '\U00002ABD', - "sube;": '\U00002286', - "subedot;": '\U00002AC3', - "submult;": '\U00002AC1', - "subnE;": '\U00002ACB', - "subne;": '\U0000228A', - "subplus;": '\U00002ABF', - "subrarr;": '\U00002979', - "subset;": '\U00002282', - "subseteq;": '\U00002286', - "subseteqq;": '\U00002AC5', - "subsetneq;": '\U0000228A', - "subsetneqq;": '\U00002ACB', - "subsim;": '\U00002AC7', - "subsub;": '\U00002AD5', - "subsup;": '\U00002AD3', - "succ;": '\U0000227B', - "succapprox;": '\U00002AB8', - "succcurlyeq;": '\U0000227D', - "succeq;": '\U00002AB0', - "succnapprox;": '\U00002ABA', - "succneqq;": '\U00002AB6', - "succnsim;": '\U000022E9', - "succsim;": '\U0000227F', - "sum;": '\U00002211', - "sung;": '\U0000266A', - "sup;": '\U00002283', - "sup1;": '\U000000B9', - "sup2;": '\U000000B2', - "sup3;": '\U000000B3', - "supE;": '\U00002AC6', - "supdot;": '\U00002ABE', - "supdsub;": '\U00002AD8', - "supe;": '\U00002287', - "supedot;": '\U00002AC4', - "suphsol;": '\U000027C9', - "suphsub;": '\U00002AD7', - "suplarr;": '\U0000297B', - "supmult;": '\U00002AC2', - "supnE;": '\U00002ACC', - "supne;": '\U0000228B', - "supplus;": '\U00002AC0', - "supset;": '\U00002283', - "supseteq;": '\U00002287', - "supseteqq;": '\U00002AC6', - "supsetneq;": '\U0000228B', - "supsetneqq;": '\U00002ACC', - "supsim;": '\U00002AC8', - "supsub;": '\U00002AD4', - "supsup;": '\U00002AD6', - "swArr;": '\U000021D9', - "swarhk;": '\U00002926', - "swarr;": '\U00002199', - "swarrow;": '\U00002199', - "swnwar;": '\U0000292A', - "szlig;": '\U000000DF', - "target;": '\U00002316', - "tau;": '\U000003C4', - "tbrk;": '\U000023B4', - "tcaron;": '\U00000165', - "tcedil;": '\U00000163', - "tcy;": '\U00000442', - "tdot;": '\U000020DB', - "telrec;": '\U00002315', - "tfr;": '\U0001D531', - "there4;": '\U00002234', - "therefore;": '\U00002234', - "theta;": '\U000003B8', - "thetasym;": '\U000003D1', - "thetav;": '\U000003D1', - "thickapprox;": '\U00002248', - "thicksim;": '\U0000223C', - "thinsp;": '\U00002009', - "thkap;": '\U00002248', - "thksim;": '\U0000223C', - "thorn;": '\U000000FE', - "tilde;": '\U000002DC', - "times;": '\U000000D7', - "timesb;": '\U000022A0', - "timesbar;": '\U00002A31', - "timesd;": '\U00002A30', - "tint;": '\U0000222D', - "toea;": '\U00002928', - "top;": '\U000022A4', - "topbot;": '\U00002336', - "topcir;": '\U00002AF1', - "topf;": '\U0001D565', - "topfork;": '\U00002ADA', - "tosa;": '\U00002929', - "tprime;": '\U00002034', - "trade;": '\U00002122', - "triangle;": '\U000025B5', - "triangledown;": '\U000025BF', - "triangleleft;": '\U000025C3', - "trianglelefteq;": '\U000022B4', - "triangleq;": '\U0000225C', - "triangleright;": '\U000025B9', - "trianglerighteq;": '\U000022B5', - "tridot;": '\U000025EC', - "trie;": '\U0000225C', - "triminus;": '\U00002A3A', - "triplus;": '\U00002A39', - "trisb;": '\U000029CD', - "tritime;": '\U00002A3B', - "trpezium;": '\U000023E2', - "tscr;": '\U0001D4C9', - "tscy;": '\U00000446', - "tshcy;": '\U0000045B', - "tstrok;": '\U00000167', - "twixt;": '\U0000226C', - "twoheadleftarrow;": '\U0000219E', - "twoheadrightarrow;": '\U000021A0', - "uArr;": '\U000021D1', - "uHar;": '\U00002963', - "uacute;": '\U000000FA', - "uarr;": '\U00002191', - "ubrcy;": '\U0000045E', - "ubreve;": '\U0000016D', - "ucirc;": '\U000000FB', - "ucy;": '\U00000443', - "udarr;": '\U000021C5', - "udblac;": '\U00000171', - "udhar;": '\U0000296E', - "ufisht;": '\U0000297E', - "ufr;": '\U0001D532', - "ugrave;": '\U000000F9', - "uharl;": '\U000021BF', - "uharr;": '\U000021BE', - "uhblk;": '\U00002580', - "ulcorn;": '\U0000231C', - "ulcorner;": '\U0000231C', - "ulcrop;": '\U0000230F', - "ultri;": '\U000025F8', - "umacr;": '\U0000016B', - "uml;": '\U000000A8', - "uogon;": '\U00000173', - "uopf;": '\U0001D566', - "uparrow;": '\U00002191', - "updownarrow;": '\U00002195', - "upharpoonleft;": '\U000021BF', - "upharpoonright;": '\U000021BE', - "uplus;": '\U0000228E', - "upsi;": '\U000003C5', - "upsih;": '\U000003D2', - "upsilon;": '\U000003C5', - "upuparrows;": '\U000021C8', - "urcorn;": '\U0000231D', - "urcorner;": '\U0000231D', - "urcrop;": '\U0000230E', - "uring;": '\U0000016F', - "urtri;": '\U000025F9', - "uscr;": '\U0001D4CA', - "utdot;": '\U000022F0', - "utilde;": '\U00000169', - "utri;": '\U000025B5', - "utrif;": '\U000025B4', - "uuarr;": '\U000021C8', - "uuml;": '\U000000FC', - "uwangle;": '\U000029A7', - "vArr;": '\U000021D5', - "vBar;": '\U00002AE8', - "vBarv;": '\U00002AE9', - "vDash;": '\U000022A8', - "vangrt;": '\U0000299C', - "varepsilon;": '\U000003F5', - "varkappa;": '\U000003F0', - "varnothing;": '\U00002205', - "varphi;": '\U000003D5', - "varpi;": '\U000003D6', - "varpropto;": '\U0000221D', - "varr;": '\U00002195', - "varrho;": '\U000003F1', - "varsigma;": '\U000003C2', - "vartheta;": '\U000003D1', - "vartriangleleft;": '\U000022B2', - "vartriangleright;": '\U000022B3', - "vcy;": '\U00000432', - "vdash;": '\U000022A2', - "vee;": '\U00002228', - "veebar;": '\U000022BB', - "veeeq;": '\U0000225A', - "vellip;": '\U000022EE', - "verbar;": '\U0000007C', - "vert;": '\U0000007C', - "vfr;": '\U0001D533', - "vltri;": '\U000022B2', - "vopf;": '\U0001D567', - "vprop;": '\U0000221D', - "vrtri;": '\U000022B3', - "vscr;": '\U0001D4CB', - "vzigzag;": '\U0000299A', - "wcirc;": '\U00000175', - "wedbar;": '\U00002A5F', - "wedge;": '\U00002227', - "wedgeq;": '\U00002259', - "weierp;": '\U00002118', - "wfr;": '\U0001D534', - "wopf;": '\U0001D568', - "wp;": '\U00002118', - "wr;": '\U00002240', - "wreath;": '\U00002240', - "wscr;": '\U0001D4CC', - "xcap;": '\U000022C2', - "xcirc;": '\U000025EF', - "xcup;": '\U000022C3', - "xdtri;": '\U000025BD', - "xfr;": '\U0001D535', - "xhArr;": '\U000027FA', - "xharr;": '\U000027F7', - "xi;": '\U000003BE', - "xlArr;": '\U000027F8', - "xlarr;": '\U000027F5', - "xmap;": '\U000027FC', - "xnis;": '\U000022FB', - "xodot;": '\U00002A00', - "xopf;": '\U0001D569', - "xoplus;": '\U00002A01', - "xotime;": '\U00002A02', - "xrArr;": '\U000027F9', - "xrarr;": '\U000027F6', - "xscr;": '\U0001D4CD', - "xsqcup;": '\U00002A06', - "xuplus;": '\U00002A04', - "xutri;": '\U000025B3', - "xvee;": '\U000022C1', - "xwedge;": '\U000022C0', - "yacute;": '\U000000FD', - "yacy;": '\U0000044F', - "ycirc;": '\U00000177', - "ycy;": '\U0000044B', - "yen;": '\U000000A5', - "yfr;": '\U0001D536', - "yicy;": '\U00000457', - "yopf;": '\U0001D56A', - "yscr;": '\U0001D4CE', - "yucy;": '\U0000044E', - "yuml;": '\U000000FF', - "zacute;": '\U0000017A', - "zcaron;": '\U0000017E', - "zcy;": '\U00000437', - "zdot;": '\U0000017C', - "zeetrf;": '\U00002128', - "zeta;": '\U000003B6', - "zfr;": '\U0001D537', - "zhcy;": '\U00000436', - "zigrarr;": '\U000021DD', - "zopf;": '\U0001D56B', - "zscr;": '\U0001D4CF', - "zwj;": '\U0000200D', - "zwnj;": '\U0000200C', - "AElig": '\U000000C6', - "AMP": '\U00000026', - "Aacute": '\U000000C1', - "Acirc": '\U000000C2', - "Agrave": '\U000000C0', - "Aring": '\U000000C5', - "Atilde": '\U000000C3', - "Auml": '\U000000C4', - "COPY": '\U000000A9', - "Ccedil": '\U000000C7', - "ETH": '\U000000D0', - "Eacute": '\U000000C9', - "Ecirc": '\U000000CA', - "Egrave": '\U000000C8', - "Euml": '\U000000CB', - "GT": '\U0000003E', - "Iacute": '\U000000CD', - "Icirc": '\U000000CE', - "Igrave": '\U000000CC', - "Iuml": '\U000000CF', - "LT": '\U0000003C', - "Ntilde": '\U000000D1', - "Oacute": '\U000000D3', - "Ocirc": '\U000000D4', - "Ograve": '\U000000D2', - "Oslash": '\U000000D8', - "Otilde": '\U000000D5', - "Ouml": '\U000000D6', - "QUOT": '\U00000022', - "REG": '\U000000AE', - "THORN": '\U000000DE', - "Uacute": '\U000000DA', - "Ucirc": '\U000000DB', - "Ugrave": '\U000000D9', - "Uuml": '\U000000DC', - "Yacute": '\U000000DD', - "aacute": '\U000000E1', - "acirc": '\U000000E2', - "acute": '\U000000B4', - "aelig": '\U000000E6', - "agrave": '\U000000E0', - "amp": '\U00000026', - "aring": '\U000000E5', - "atilde": '\U000000E3', - "auml": '\U000000E4', - "brvbar": '\U000000A6', - "ccedil": '\U000000E7', - "cedil": '\U000000B8', - "cent": '\U000000A2', - "copy": '\U000000A9', - "curren": '\U000000A4', - "deg": '\U000000B0', - "divide": '\U000000F7', - "eacute": '\U000000E9', - "ecirc": '\U000000EA', - "egrave": '\U000000E8', - "eth": '\U000000F0', - "euml": '\U000000EB', - "frac12": '\U000000BD', - "frac14": '\U000000BC', - "frac34": '\U000000BE', - "gt": '\U0000003E', - "iacute": '\U000000ED', - "icirc": '\U000000EE', - "iexcl": '\U000000A1', - "igrave": '\U000000EC', - "iquest": '\U000000BF', - "iuml": '\U000000EF', - "laquo": '\U000000AB', - "lt": '\U0000003C', - "macr": '\U000000AF', - "micro": '\U000000B5', - "middot": '\U000000B7', - "nbsp": '\U000000A0', - "not": '\U000000AC', - "ntilde": '\U000000F1', - "oacute": '\U000000F3', - "ocirc": '\U000000F4', - "ograve": '\U000000F2', - "ordf": '\U000000AA', - "ordm": '\U000000BA', - "oslash": '\U000000F8', - "otilde": '\U000000F5', - "ouml": '\U000000F6', - "para": '\U000000B6', - "plusmn": '\U000000B1', - "pound": '\U000000A3', - "quot": '\U00000022', - "raquo": '\U000000BB', - "reg": '\U000000AE', - "sect": '\U000000A7', - "shy": '\U000000AD', - "sup1": '\U000000B9', - "sup2": '\U000000B2', - "sup3": '\U000000B3', - "szlig": '\U000000DF', - "thorn": '\U000000FE', - "times": '\U000000D7', - "uacute": '\U000000FA', - "ucirc": '\U000000FB', - "ugrave": '\U000000F9', - "uml": '\U000000A8', - "uuml": '\U000000FC', - "yacute": '\U000000FD', - "yen": '\U000000A5', - "yuml": '\U000000FF', -} - -// HTML entities that are two unicode codepoints. -var entity2 = map[string][2]rune{ - // TODO(nigeltao): Handle replacements that are wider than their names. - // "nLt;": {'\u226A', '\u20D2'}, - // "nGt;": {'\u226B', '\u20D2'}, - "NotEqualTilde;": {'\u2242', '\u0338'}, - "NotGreaterFullEqual;": {'\u2267', '\u0338'}, - "NotGreaterGreater;": {'\u226B', '\u0338'}, - "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, - "NotHumpDownHump;": {'\u224E', '\u0338'}, - "NotHumpEqual;": {'\u224F', '\u0338'}, - "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, - "NotLessLess;": {'\u226A', '\u0338'}, - "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, - "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, - "NotNestedLessLess;": {'\u2AA1', '\u0338'}, - "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, - "NotRightTriangleBar;": {'\u29D0', '\u0338'}, - "NotSquareSubset;": {'\u228F', '\u0338'}, - "NotSquareSuperset;": {'\u2290', '\u0338'}, - "NotSubset;": {'\u2282', '\u20D2'}, - "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, - "NotSucceedsTilde;": {'\u227F', '\u0338'}, - "NotSuperset;": {'\u2283', '\u20D2'}, - "ThickSpace;": {'\u205F', '\u200A'}, - "acE;": {'\u223E', '\u0333'}, - "bne;": {'\u003D', '\u20E5'}, - "bnequiv;": {'\u2261', '\u20E5'}, - "caps;": {'\u2229', '\uFE00'}, - "cups;": {'\u222A', '\uFE00'}, - "fjlig;": {'\u0066', '\u006A'}, - "gesl;": {'\u22DB', '\uFE00'}, - "gvertneqq;": {'\u2269', '\uFE00'}, - "gvnE;": {'\u2269', '\uFE00'}, - "lates;": {'\u2AAD', '\uFE00'}, - "lesg;": {'\u22DA', '\uFE00'}, - "lvertneqq;": {'\u2268', '\uFE00'}, - "lvnE;": {'\u2268', '\uFE00'}, - "nGg;": {'\u22D9', '\u0338'}, - "nGtv;": {'\u226B', '\u0338'}, - "nLl;": {'\u22D8', '\u0338'}, - "nLtv;": {'\u226A', '\u0338'}, - "nang;": {'\u2220', '\u20D2'}, - "napE;": {'\u2A70', '\u0338'}, - "napid;": {'\u224B', '\u0338'}, - "nbump;": {'\u224E', '\u0338'}, - "nbumpe;": {'\u224F', '\u0338'}, - "ncongdot;": {'\u2A6D', '\u0338'}, - "nedot;": {'\u2250', '\u0338'}, - "nesim;": {'\u2242', '\u0338'}, - "ngE;": {'\u2267', '\u0338'}, - "ngeqq;": {'\u2267', '\u0338'}, - "ngeqslant;": {'\u2A7E', '\u0338'}, - "nges;": {'\u2A7E', '\u0338'}, - "nlE;": {'\u2266', '\u0338'}, - "nleqq;": {'\u2266', '\u0338'}, - "nleqslant;": {'\u2A7D', '\u0338'}, - "nles;": {'\u2A7D', '\u0338'}, - "notinE;": {'\u22F9', '\u0338'}, - "notindot;": {'\u22F5', '\u0338'}, - "nparsl;": {'\u2AFD', '\u20E5'}, - "npart;": {'\u2202', '\u0338'}, - "npre;": {'\u2AAF', '\u0338'}, - "npreceq;": {'\u2AAF', '\u0338'}, - "nrarrc;": {'\u2933', '\u0338'}, - "nrarrw;": {'\u219D', '\u0338'}, - "nsce;": {'\u2AB0', '\u0338'}, - "nsubE;": {'\u2AC5', '\u0338'}, - "nsubset;": {'\u2282', '\u20D2'}, - "nsubseteqq;": {'\u2AC5', '\u0338'}, - "nsucceq;": {'\u2AB0', '\u0338'}, - "nsupE;": {'\u2AC6', '\u0338'}, - "nsupset;": {'\u2283', '\u20D2'}, - "nsupseteqq;": {'\u2AC6', '\u0338'}, - "nvap;": {'\u224D', '\u20D2'}, - "nvge;": {'\u2265', '\u20D2'}, - "nvgt;": {'\u003E', '\u20D2'}, - "nvle;": {'\u2264', '\u20D2'}, - "nvlt;": {'\u003C', '\u20D2'}, - "nvltrie;": {'\u22B4', '\u20D2'}, - "nvrtrie;": {'\u22B5', '\u20D2'}, - "nvsim;": {'\u223C', '\u20D2'}, - "race;": {'\u223D', '\u0331'}, - "smtes;": {'\u2AAC', '\uFE00'}, - "sqcaps;": {'\u2293', '\uFE00'}, - "sqcups;": {'\u2294', '\uFE00'}, - "varsubsetneq;": {'\u228A', '\uFE00'}, - "varsubsetneqq;": {'\u2ACB', '\uFE00'}, - "varsupsetneq;": {'\u228B', '\uFE00'}, - "varsupsetneqq;": {'\u2ACC', '\uFE00'}, - "vnsub;": {'\u2282', '\u20D2'}, - "vnsup;": {'\u2283', '\u20D2'}, - "vsubnE;": {'\u2ACB', '\uFE00'}, - "vsubne;": {'\u228A', '\uFE00'}, - "vsupnE;": {'\u2ACC', '\uFE00'}, - "vsupne;": {'\u228B', '\uFE00'}, -} diff --git a/vendor/golang.org/x/net/html/escape.go b/vendor/golang.org/x/net/html/escape.go deleted file mode 100644 index d85613962..000000000 --- a/vendor/golang.org/x/net/html/escape.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "bytes" - "strings" - "unicode/utf8" -) - -// These replacements permit compatibility with old numeric entities that -// assumed Windows-1252 encoding. -// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference -var replacementTable = [...]rune{ - '\u20AC', // First entry is what 0x80 should be replaced with. - '\u0081', - '\u201A', - '\u0192', - '\u201E', - '\u2026', - '\u2020', - '\u2021', - '\u02C6', - '\u2030', - '\u0160', - '\u2039', - '\u0152', - '\u008D', - '\u017D', - '\u008F', - '\u0090', - '\u2018', - '\u2019', - '\u201C', - '\u201D', - '\u2022', - '\u2013', - '\u2014', - '\u02DC', - '\u2122', - '\u0161', - '\u203A', - '\u0153', - '\u009D', - '\u017E', - '\u0178', // Last entry is 0x9F. - // 0x00->'\uFFFD' is handled programmatically. - // 0x0D->'\u000D' is a no-op. -} - -// unescapeEntity reads an entity like "<" from b[src:] and writes the -// corresponding "<" to b[dst:], returning the incremented dst and src cursors. -// Precondition: b[src] == '&' && dst <= src. -// attribute should be true if parsing an attribute value. -func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { - // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference - - // i starts at 1 because we already know that s[0] == '&'. - i, s := 1, b[src:] - - if len(s) <= 1 { - b[dst] = b[src] - return dst + 1, src + 1 - } - - if s[i] == '#' { - if len(s) <= 3 { // We need to have at least "&#.". - b[dst] = b[src] - return dst + 1, src + 1 - } - i++ - c := s[i] - hex := false - if c == 'x' || c == 'X' { - hex = true - i++ - } - - x := '\x00' - for i < len(s) { - c = s[i] - i++ - if hex { - if '0' <= c && c <= '9' { - x = 16*x + rune(c) - '0' - continue - } else if 'a' <= c && c <= 'f' { - x = 16*x + rune(c) - 'a' + 10 - continue - } else if 'A' <= c && c <= 'F' { - x = 16*x + rune(c) - 'A' + 10 - continue - } - } else if '0' <= c && c <= '9' { - x = 10*x + rune(c) - '0' - continue - } - if c != ';' { - i-- - } - break - } - - if i <= 3 { // No characters matched. - b[dst] = b[src] - return dst + 1, src + 1 - } - - if 0x80 <= x && x <= 0x9F { - // Replace characters from Windows-1252 with UTF-8 equivalents. - x = replacementTable[x-0x80] - } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { - // Replace invalid characters with the replacement character. - x = '\uFFFD' - } - - return dst + utf8.EncodeRune(b[dst:], x), src + i - } - - // Consume the maximum number of characters possible, with the - // consumed characters matching one of the named references. - - for i < len(s) { - c := s[i] - i++ - // Lower-cased characters are more common in entities, so we check for them first. - if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { - continue - } - if c != ';' { - i-- - } - break - } - - entityName := string(s[1:i]) - if entityName == "" { - // No-op. - } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { - // No-op. - } else if x := entity[entityName]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + i - } else if x := entity2[entityName]; x[0] != 0 { - dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) - return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i - } else if !attribute { - maxLen := len(entityName) - 1 - if maxLen > longestEntityWithoutSemicolon { - maxLen = longestEntityWithoutSemicolon - } - for j := maxLen; j > 1; j-- { - if x := entity[entityName[:j]]; x != 0 { - return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 - } - } - } - - dst1, src1 = dst+i, src+i - copy(b[dst:dst1], b[src:src1]) - return dst1, src1 -} - -// unescape unescapes b's entities in-place, so that "a<b" becomes "a': - esc = ">" - case '"': - // """ is shorter than """. - esc = """ - case '\r': - esc = " " - default: - panic("unrecognized escape character") - } - s = s[i+1:] - if _, err := w.WriteString(esc); err != nil { - return err - } - i = strings.IndexAny(s, escapedChars) - } - _, err := w.WriteString(s) - return err -} - -// EscapeString escapes special characters like "<" to become "<". It -// escapes only five such characters: <, >, &, ' and ". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func EscapeString(s string) string { - if strings.IndexAny(s, escapedChars) == -1 { - return s - } - var buf bytes.Buffer - escape(&buf, s) - return buf.String() -} - -// UnescapeString unescapes entities like "<" to become "<". It unescapes a -// larger range of entities than EscapeString escapes. For example, "á" -// unescapes to "á", as does "á" and "&xE1;". -// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't -// always true. -func UnescapeString(s string) string { - for _, c := range s { - if c == '&' { - return string(unescape([]byte(s), false)) - } - } - return s -} diff --git a/vendor/golang.org/x/net/html/foreign.go b/vendor/golang.org/x/net/html/foreign.go deleted file mode 100644 index 01477a963..000000000 --- a/vendor/golang.org/x/net/html/foreign.go +++ /dev/null @@ -1,226 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "strings" -) - -func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { - for i := range aa { - if newName, ok := nameMap[aa[i].Key]; ok { - aa[i].Key = newName - } - } -} - -func adjustForeignAttributes(aa []Attribute) { - for i, a := range aa { - if a.Key == "" || a.Key[0] != 'x' { - continue - } - switch a.Key { - case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", - "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": - j := strings.Index(a.Key, ":") - aa[i].Namespace = a.Key[:j] - aa[i].Key = a.Key[j+1:] - } - } -} - -func htmlIntegrationPoint(n *Node) bool { - if n.Type != ElementNode { - return false - } - switch n.Namespace { - case "math": - if n.Data == "annotation-xml" { - for _, a := range n.Attr { - if a.Key == "encoding" { - val := strings.ToLower(a.Val) - if val == "text/html" || val == "application/xhtml+xml" { - return true - } - } - } - } - case "svg": - switch n.Data { - case "desc", "foreignObject", "title": - return true - } - } - return false -} - -func mathMLTextIntegrationPoint(n *Node) bool { - if n.Namespace != "math" { - return false - } - switch n.Data { - case "mi", "mo", "mn", "ms", "mtext": - return true - } - return false -} - -// Section 12.2.6.5. -var breakout = map[string]bool{ - "b": true, - "big": true, - "blockquote": true, - "body": true, - "br": true, - "center": true, - "code": true, - "dd": true, - "div": true, - "dl": true, - "dt": true, - "em": true, - "embed": true, - "h1": true, - "h2": true, - "h3": true, - "h4": true, - "h5": true, - "h6": true, - "head": true, - "hr": true, - "i": true, - "img": true, - "li": true, - "listing": true, - "menu": true, - "meta": true, - "nobr": true, - "ol": true, - "p": true, - "pre": true, - "ruby": true, - "s": true, - "small": true, - "span": true, - "strong": true, - "strike": true, - "sub": true, - "sup": true, - "table": true, - "tt": true, - "u": true, - "ul": true, - "var": true, -} - -// Section 12.2.6.5. -var svgTagNameAdjustments = map[string]string{ - "altglyph": "altGlyph", - "altglyphdef": "altGlyphDef", - "altglyphitem": "altGlyphItem", - "animatecolor": "animateColor", - "animatemotion": "animateMotion", - "animatetransform": "animateTransform", - "clippath": "clipPath", - "feblend": "feBlend", - "fecolormatrix": "feColorMatrix", - "fecomponenttransfer": "feComponentTransfer", - "fecomposite": "feComposite", - "feconvolvematrix": "feConvolveMatrix", - "fediffuselighting": "feDiffuseLighting", - "fedisplacementmap": "feDisplacementMap", - "fedistantlight": "feDistantLight", - "feflood": "feFlood", - "fefunca": "feFuncA", - "fefuncb": "feFuncB", - "fefuncg": "feFuncG", - "fefuncr": "feFuncR", - "fegaussianblur": "feGaussianBlur", - "feimage": "feImage", - "femerge": "feMerge", - "femergenode": "feMergeNode", - "femorphology": "feMorphology", - "feoffset": "feOffset", - "fepointlight": "fePointLight", - "fespecularlighting": "feSpecularLighting", - "fespotlight": "feSpotLight", - "fetile": "feTile", - "feturbulence": "feTurbulence", - "foreignobject": "foreignObject", - "glyphref": "glyphRef", - "lineargradient": "linearGradient", - "radialgradient": "radialGradient", - "textpath": "textPath", -} - -// Section 12.2.6.1 -var mathMLAttributeAdjustments = map[string]string{ - "definitionurl": "definitionURL", -} - -var svgAttributeAdjustments = map[string]string{ - "attributename": "attributeName", - "attributetype": "attributeType", - "basefrequency": "baseFrequency", - "baseprofile": "baseProfile", - "calcmode": "calcMode", - "clippathunits": "clipPathUnits", - "contentscripttype": "contentScriptType", - "contentstyletype": "contentStyleType", - "diffuseconstant": "diffuseConstant", - "edgemode": "edgeMode", - "externalresourcesrequired": "externalResourcesRequired", - "filterres": "filterRes", - "filterunits": "filterUnits", - "glyphref": "glyphRef", - "gradienttransform": "gradientTransform", - "gradientunits": "gradientUnits", - "kernelmatrix": "kernelMatrix", - "kernelunitlength": "kernelUnitLength", - "keypoints": "keyPoints", - "keysplines": "keySplines", - "keytimes": "keyTimes", - "lengthadjust": "lengthAdjust", - "limitingconeangle": "limitingConeAngle", - "markerheight": "markerHeight", - "markerunits": "markerUnits", - "markerwidth": "markerWidth", - "maskcontentunits": "maskContentUnits", - "maskunits": "maskUnits", - "numoctaves": "numOctaves", - "pathlength": "pathLength", - "patterncontentunits": "patternContentUnits", - "patterntransform": "patternTransform", - "patternunits": "patternUnits", - "pointsatx": "pointsAtX", - "pointsaty": "pointsAtY", - "pointsatz": "pointsAtZ", - "preservealpha": "preserveAlpha", - "preserveaspectratio": "preserveAspectRatio", - "primitiveunits": "primitiveUnits", - "refx": "refX", - "refy": "refY", - "repeatcount": "repeatCount", - "repeatdur": "repeatDur", - "requiredextensions": "requiredExtensions", - "requiredfeatures": "requiredFeatures", - "specularconstant": "specularConstant", - "specularexponent": "specularExponent", - "spreadmethod": "spreadMethod", - "startoffset": "startOffset", - "stddeviation": "stdDeviation", - "stitchtiles": "stitchTiles", - "surfacescale": "surfaceScale", - "systemlanguage": "systemLanguage", - "tablevalues": "tableValues", - "targetx": "targetX", - "targety": "targetY", - "textlength": "textLength", - "viewbox": "viewBox", - "viewtarget": "viewTarget", - "xchannelselector": "xChannelSelector", - "ychannelselector": "yChannelSelector", - "zoomandpan": "zoomAndPan", -} diff --git a/vendor/golang.org/x/net/html/node.go b/vendor/golang.org/x/net/html/node.go deleted file mode 100644 index 2c1cade60..000000000 --- a/vendor/golang.org/x/net/html/node.go +++ /dev/null @@ -1,220 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "golang.org/x/net/html/atom" -) - -// A NodeType is the type of a Node. -type NodeType uint32 - -const ( - ErrorNode NodeType = iota - TextNode - DocumentNode - ElementNode - CommentNode - DoctypeNode - scopeMarkerNode -) - -// Section 12.2.4.3 says "The markers are inserted when entering applet, -// object, marquee, template, td, th, and caption elements, and are used -// to prevent formatting from "leaking" into applet, object, marquee, -// template, td, th, and caption elements". -var scopeMarker = Node{Type: scopeMarkerNode} - -// A Node consists of a NodeType and some Data (tag name for element nodes, -// content for text) and are part of a tree of Nodes. Element nodes may also -// have a Namespace and contain a slice of Attributes. Data is unescaped, so -// that it looks like "a 0 { - return (*s)[i-1] - } - return nil -} - -// index returns the index of the top-most occurrence of n in the stack, or -1 -// if n is not present. -func (s *nodeStack) index(n *Node) int { - for i := len(*s) - 1; i >= 0; i-- { - if (*s)[i] == n { - return i - } - } - return -1 -} - -// contains returns whether a is within s. -func (s *nodeStack) contains(a atom.Atom) bool { - for _, n := range *s { - if n.DataAtom == a { - return true - } - } - return false -} - -// insert inserts a node at the given index. -func (s *nodeStack) insert(i int, n *Node) { - (*s) = append(*s, nil) - copy((*s)[i+1:], (*s)[i:]) - (*s)[i] = n -} - -// remove removes a node from the stack. It is a no-op if n is not present. -func (s *nodeStack) remove(n *Node) { - i := s.index(n) - if i == -1 { - return - } - copy((*s)[i:], (*s)[i+1:]) - j := len(*s) - 1 - (*s)[j] = nil - *s = (*s)[:j] -} - -type insertionModeStack []insertionMode - -func (s *insertionModeStack) pop() (im insertionMode) { - i := len(*s) - im = (*s)[i-1] - *s = (*s)[:i-1] - return im -} - -func (s *insertionModeStack) top() insertionMode { - if i := len(*s); i > 0 { - return (*s)[i-1] - } - return nil -} diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go deleted file mode 100644 index 64a579372..000000000 --- a/vendor/golang.org/x/net/html/parse.go +++ /dev/null @@ -1,2311 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package html - -import ( - "errors" - "fmt" - "io" - "strings" - - a "golang.org/x/net/html/atom" -) - -// A parser implements the HTML5 parsing algorithm: -// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction -type parser struct { - // tokenizer provides the tokens for the parser. - tokenizer *Tokenizer - // tok is the most recently read token. - tok Token - // Self-closing tags like
are treated as start tags, except that - // hasSelfClosingToken is set while they are being processed. - hasSelfClosingToken bool - // doc is the document root element. - doc *Node - // The stack of open elements (section 12.2.4.2) and active formatting - // elements (section 12.2.4.3). - oe, afe nodeStack - // Element pointers (section 12.2.4.4). - head, form *Node - // Other parsing state flags (section 12.2.4.5). - scripting, framesetOK bool - // The stack of template insertion modes - templateStack insertionModeStack - // im is the current insertion mode. - im insertionMode - // originalIM is the insertion mode to go back to after completing a text - // or inTableText insertion mode. - originalIM insertionMode - // fosterParenting is whether new elements should be inserted according to - // the foster parenting rules (section 12.2.6.1). - fosterParenting bool - // quirks is whether the parser is operating in "quirks mode." - quirks bool - // fragment is whether the parser is parsing an HTML fragment. - fragment bool - // context is the context element when parsing an HTML fragment - // (section 12.4). - context *Node -} - -func (p *parser) top() *Node { - if n := p.oe.top(); n != nil { - return n - } - return p.doc -} - -// Stop tags for use in popUntil. These come from section 12.2.4.2. -var ( - defaultScopeStopTags = map[string][]a.Atom{ - "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, - "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, - "svg": {a.Desc, a.ForeignObject, a.Title}, - } -) - -type scope int - -const ( - defaultScope scope = iota - listItemScope - buttonScope - tableScope - tableRowScope - tableBodyScope - selectScope -) - -// popUntil pops the stack of open elements at the highest element whose tag -// is in matchTags, provided there is no higher element in the scope's stop -// tags (as defined in section 12.2.4.2). It returns whether or not there was -// such an element. If there was not, popUntil leaves the stack unchanged. -// -// For example, the set of stop tags for table scope is: "html", "table". If -// the stack was: -// ["html", "body", "font", "table", "b", "i", "u"] -// then popUntil(tableScope, "font") would return false, but -// popUntil(tableScope, "i") would return true and the stack would become: -// ["html", "body", "font", "table", "b"] -// -// If an element's tag is in both the stop tags and matchTags, then the stack -// will be popped and the function returns true (provided, of course, there was -// no higher element in the stack that was also in the stop tags). For example, -// popUntil(tableScope, "table") returns true and leaves: -// ["html", "body", "font"] -func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { - if i := p.indexOfElementInScope(s, matchTags...); i != -1 { - p.oe = p.oe[:i] - return true - } - return false -} - -// indexOfElementInScope returns the index in p.oe of the highest element whose -// tag is in matchTags that is in scope. If no matching element is in scope, it -// returns -1. -func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - if p.oe[i].Namespace == "" { - for _, t := range matchTags { - if t == tagAtom { - return i - } - } - switch s { - case defaultScope: - // No-op. - case listItemScope: - if tagAtom == a.Ol || tagAtom == a.Ul { - return -1 - } - case buttonScope: - if tagAtom == a.Button { - return -1 - } - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - return -1 - } - case selectScope: - if tagAtom != a.Optgroup && tagAtom != a.Option { - return -1 - } - default: - panic("unreachable") - } - } - switch s { - case defaultScope, listItemScope, buttonScope: - for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { - if t == tagAtom { - return -1 - } - } - } - } - return -1 -} - -// elementInScope is like popUntil, except that it doesn't modify the stack of -// open elements. -func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { - return p.indexOfElementInScope(s, matchTags...) != -1 -} - -// clearStackToContext pops elements off the stack of open elements until a -// scope-defined element is found. -func (p *parser) clearStackToContext(s scope) { - for i := len(p.oe) - 1; i >= 0; i-- { - tagAtom := p.oe[i].DataAtom - switch s { - case tableScope: - if tagAtom == a.Html || tagAtom == a.Table || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableRowScope: - if tagAtom == a.Html || tagAtom == a.Tr || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - case tableBodyScope: - if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead || tagAtom == a.Template { - p.oe = p.oe[:i+1] - return - } - default: - panic("unreachable") - } - } -} - -// generateImpliedEndTags pops nodes off the stack of open elements as long as -// the top node has a tag name of dd, dt, li, optgroup, option, p, rb, rp, rt or rtc. -// If exceptions are specified, nodes with that name will not be popped off. -func (p *parser) generateImpliedEndTags(exceptions ...string) { - var i int -loop: - for i = len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - if n.Type == ElementNode { - switch n.DataAtom { - case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc: - for _, except := range exceptions { - if n.Data == except { - break loop - } - } - continue - } - } - break - } - - p.oe = p.oe[:i+1] -} - -// addChild adds a child node n to the top element, and pushes n onto the stack -// of open elements if it is an element node. -func (p *parser) addChild(n *Node) { - if p.shouldFosterParent() { - p.fosterParent(n) - } else { - p.top().AppendChild(n) - } - - if n.Type == ElementNode { - p.oe = append(p.oe, n) - } -} - -// shouldFosterParent returns whether the next node to be added should be -// foster parented. -func (p *parser) shouldFosterParent() bool { - if p.fosterParenting { - switch p.top().DataAtom { - case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: - return true - } - } - return false -} - -// fosterParent adds a child node according to the foster parenting rules. -// Section 12.2.6.1, "foster parenting". -func (p *parser) fosterParent(n *Node) { - var table, parent, prev, template *Node - var i int - for i = len(p.oe) - 1; i >= 0; i-- { - if p.oe[i].DataAtom == a.Table { - table = p.oe[i] - break - } - } - - var j int - for j = len(p.oe) - 1; j >= 0; j-- { - if p.oe[j].DataAtom == a.Template { - template = p.oe[j] - break - } - } - - if template != nil && (table == nil || j > i) { - template.AppendChild(n) - return - } - - if table == nil { - // The foster parent is the html element. - parent = p.oe[0] - } else { - parent = table.Parent - } - if parent == nil { - parent = p.oe[i-1] - } - - if table != nil { - prev = table.PrevSibling - } else { - prev = parent.LastChild - } - if prev != nil && prev.Type == TextNode && n.Type == TextNode { - prev.Data += n.Data - return - } - - parent.InsertBefore(n, table) -} - -// addText adds text to the preceding node if it is a text node, or else it -// calls addChild with a new text node. -func (p *parser) addText(text string) { - if text == "" { - return - } - - if p.shouldFosterParent() { - p.fosterParent(&Node{ - Type: TextNode, - Data: text, - }) - return - } - - t := p.top() - if n := t.LastChild; n != nil && n.Type == TextNode { - n.Data += text - return - } - p.addChild(&Node{ - Type: TextNode, - Data: text, - }) -} - -// addElement adds a child element based on the current token. -func (p *parser) addElement() { - p.addChild(&Node{ - Type: ElementNode, - DataAtom: p.tok.DataAtom, - Data: p.tok.Data, - Attr: p.tok.Attr, - }) -} - -// Section 12.2.4.3. -func (p *parser) addFormattingElement() { - tagAtom, attr := p.tok.DataAtom, p.tok.Attr - p.addElement() - - // Implement the Noah's Ark clause, but with three per family instead of two. - identicalElements := 0 -findIdenticalElements: - for i := len(p.afe) - 1; i >= 0; i-- { - n := p.afe[i] - if n.Type == scopeMarkerNode { - break - } - if n.Type != ElementNode { - continue - } - if n.Namespace != "" { - continue - } - if n.DataAtom != tagAtom { - continue - } - if len(n.Attr) != len(attr) { - continue - } - compareAttributes: - for _, t0 := range n.Attr { - for _, t1 := range attr { - if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { - // Found a match for this attribute, continue with the next attribute. - continue compareAttributes - } - } - // If we get here, there is no attribute that matches a. - // Therefore the element is not identical to the new one. - continue findIdenticalElements - } - - identicalElements++ - if identicalElements >= 3 { - p.afe.remove(n) - } - } - - p.afe = append(p.afe, p.top()) -} - -// Section 12.2.4.3. -func (p *parser) clearActiveFormattingElements() { - for { - n := p.afe.pop() - if len(p.afe) == 0 || n.Type == scopeMarkerNode { - return - } - } -} - -// Section 12.2.4.3. -func (p *parser) reconstructActiveFormattingElements() { - n := p.afe.top() - if n == nil { - return - } - if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { - return - } - i := len(p.afe) - 1 - for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { - if i == 0 { - i = -1 - break - } - i-- - n = p.afe[i] - } - for { - i++ - clone := p.afe[i].clone() - p.addChild(clone) - p.afe[i] = clone - if i == len(p.afe)-1 { - break - } - } -} - -// Section 12.2.5. -func (p *parser) acknowledgeSelfClosingTag() { - p.hasSelfClosingToken = false -} - -// An insertion mode (section 12.2.4.1) is the state transition function from -// a particular state in the HTML5 parser's state machine. It updates the -// parser's fields depending on parser.tok (where ErrorToken means EOF). -// It returns whether the token was consumed. -type insertionMode func(*parser) bool - -// setOriginalIM sets the insertion mode to return to after completing a text or -// inTableText insertion mode. -// Section 12.2.4.1, "using the rules for". -func (p *parser) setOriginalIM() { - if p.originalIM != nil { - panic("html: bad parser state: originalIM was set twice") - } - p.originalIM = p.im -} - -// Section 12.2.4.1, "reset the insertion mode". -func (p *parser) resetInsertionMode() { - for i := len(p.oe) - 1; i >= 0; i-- { - n := p.oe[i] - last := i == 0 - if last && p.context != nil { - n = p.context - } - - switch n.DataAtom { - case a.Select: - if !last { - for ancestor, first := n, p.oe[0]; ancestor != first; { - if ancestor == first { - break - } - ancestor = p.oe[p.oe.index(ancestor)-1] - switch ancestor.DataAtom { - case a.Template: - p.im = inSelectIM - return - case a.Table: - p.im = inSelectInTableIM - return - } - } - } - p.im = inSelectIM - case a.Td, a.Th: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inCellIM - case a.Tr: - p.im = inRowIM - case a.Tbody, a.Thead, a.Tfoot: - p.im = inTableBodyIM - case a.Caption: - p.im = inCaptionIM - case a.Colgroup: - p.im = inColumnGroupIM - case a.Table: - p.im = inTableIM - case a.Template: - // TODO: remove this divergence from the HTML5 spec. - if n.Namespace != "" { - continue - } - p.im = p.templateStack.top() - case a.Head: - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.im = inHeadIM - case a.Body: - p.im = inBodyIM - case a.Frameset: - p.im = inFramesetIM - case a.Html: - if p.head == nil { - p.im = beforeHeadIM - } else { - p.im = afterHeadIM - } - default: - if last { - p.im = inBodyIM - return - } - continue - } - return - } -} - -const whitespace = " \t\r\n\f" - -// Section 12.2.6.4.1. -func initialIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - n, quirks := parseDoctype(p.tok.Data) - p.doc.AppendChild(n) - p.quirks = quirks - p.im = beforeHTMLIM - return true - } - p.quirks = true - p.im = beforeHTMLIM - return false -} - -// Section 12.2.6.4.2. -func beforeHTMLIM(p *parser) bool { - switch p.tok.Type { - case DoctypeToken: - // Ignore the token. - return true - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - if p.tok.DataAtom == a.Html { - p.addElement() - p.im = beforeHeadIM - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.doc.AppendChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - } - p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) - return false -} - -// Section 12.2.6.4.3. -func beforeHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) - if len(p.tok.Data) == 0 { - // It was all whitespace, so ignore it. - return true - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Head: - p.addElement() - p.head = p.top() - p.im = inHeadIM - return true - case a.Html: - return inBodyIM(p) - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head, a.Body, a.Html, a.Br: - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.6.4.4. -func inHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: - p.addElement() - p.oe.pop() - p.acknowledgeSelfClosingTag() - return true - case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: - p.addElement() - p.setOriginalIM() - p.im = textIM - return true - case a.Head: - // Ignore the token. - return true - case a.Template: - p.addElement() - p.afe = append(p.afe, &scopeMarker) - p.framesetOK = false - p.im = inTemplateIM - p.templateStack = append(p.templateStack, inTemplateIM) - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Head: - p.oe.pop() - p.im = afterHeadIM - return true - case a.Body, a.Html, a.Br: - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false - case a.Template: - if !p.oe.contains(a.Template) { - return true - } - // TODO: remove this divergence from the HTML5 spec. - // - // See https://bugs.chromium.org/p/chromium/issues/detail?id=829668 - p.generateImpliedEndTags() - for i := len(p.oe) - 1; i >= 0; i-- { - if n := p.oe[i]; n.Namespace == "" && n.DataAtom == a.Template { - p.oe = p.oe[:i] - break - } - } - p.clearActiveFormattingElements() - p.templateStack.pop() - p.resetInsertionMode() - return true - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) - return false -} - -// Section 12.2.6.4.6. -func afterHeadIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - s := strings.TrimLeft(p.tok.Data, whitespace) - if len(s) < len(p.tok.Data) { - // Add the initial whitespace to the current node. - p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) - if s == "" { - return true - } - p.tok.Data = s - } - case StartTagToken: - switch p.tok.DataAtom { - case a.Html: - return inBodyIM(p) - case a.Body: - p.addElement() - p.framesetOK = false - p.im = inBodyIM - return true - case a.Frameset: - p.addElement() - p.im = inFramesetIM - return true - case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title: - p.oe = append(p.oe, p.head) - defer p.oe.remove(p.head) - return inHeadIM(p) - case a.Head: - // Ignore the token. - return true - } - case EndTagToken: - switch p.tok.DataAtom { - case a.Body, a.Html, a.Br: - // Drop down to creating an implied tag. - case a.Template: - return inHeadIM(p) - default: - // Ignore the token. - return true - } - case CommentToken: - p.addChild(&Node{ - Type: CommentNode, - Data: p.tok.Data, - }) - return true - case DoctypeToken: - // Ignore the token. - return true - } - - p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) - p.framesetOK = true - return false -} - -// copyAttributes copies attributes of src not found on dst to dst. -func copyAttributes(dst *Node, src Token) { - if len(src.Attr) == 0 { - return - } - attr := map[string]string{} - for _, t := range dst.Attr { - attr[t.Key] = t.Val - } - for _, t := range src.Attr { - if _, ok := attr[t.Key]; !ok { - dst.Attr = append(dst.Attr, t) - attr[t.Key] = t.Val - } - } -} - -// Section 12.2.6.4.7. -func inBodyIM(p *parser) bool { - switch p.tok.Type { - case TextToken: - d := p.tok.Data - switch n := p.oe.top(); n.DataAtom { - case a.Pre, a.Listing: - if n.FirstChild == nil { - // Ignore a newline at the start of a
 block.
-				if d != "" && d[0] == '\r' {
-					d = d[1:]
-				}
-				if d != "" && d[0] == '\n' {
-					d = d[1:]
-				}
-			}
-		}
-		d = strings.Replace(d, "\x00", "", -1)
-		if d == "" {
-			return true
-		}
-		p.reconstructActiveFormattingElements()
-		p.addText(d)
-		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
-			// There were non-whitespace characters inserted.
-			p.framesetOK = false
-		}
-	case StartTagToken:
-		switch p.tok.DataAtom {
-		case a.Html:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			copyAttributes(p.oe[0], p.tok)
-		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Template, a.Title:
-			return inHeadIM(p)
-		case a.Body:
-			if p.oe.contains(a.Template) {
-				return true
-			}
-			if len(p.oe) >= 2 {
-				body := p.oe[1]
-				if body.Type == ElementNode && body.DataAtom == a.Body {
-					p.framesetOK = false
-					copyAttributes(body, p.tok)
-				}
-			}
-		case a.Frameset:
-			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
-				// Ignore the token.
-				return true
-			}
-			body := p.oe[1]
-			if body.Parent != nil {
-				body.Parent.RemoveChild(body)
-			}
-			p.oe = p.oe[:1]
-			p.addElement()
-			p.im = inFramesetIM
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(buttonScope, a.P)
-			switch n := p.top(); n.DataAtom {
-			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-				p.oe.pop()
-			}
-			p.addElement()
-		case a.Pre, a.Listing:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			// The newline, if any, will be dealt with by the TextToken case.
-			p.framesetOK = false
-		case a.Form:
-			if p.form != nil && !p.oe.contains(a.Template) {
-				// Ignore the token
-				return true
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			if !p.oe.contains(a.Template) {
-				p.form = p.top()
-			}
-		case a.Li:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Li:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Dd, a.Dt:
-			p.framesetOK = false
-			for i := len(p.oe) - 1; i >= 0; i-- {
-				node := p.oe[i]
-				switch node.DataAtom {
-				case a.Dd, a.Dt:
-					p.oe = p.oe[:i]
-				case a.Address, a.Div, a.P:
-					continue
-				default:
-					if !isSpecialElement(node) {
-						continue
-					}
-				}
-				break
-			}
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Plaintext:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-		case a.Button:
-			p.popUntil(defaultScope, a.Button)
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-		case a.A:
-			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
-				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
-					p.inBodyEndTagFormatting(a.A)
-					p.oe.remove(n)
-					p.afe.remove(n)
-					break
-				}
-			}
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.reconstructActiveFormattingElements()
-			p.addFormattingElement()
-		case a.Nobr:
-			p.reconstructActiveFormattingElements()
-			if p.elementInScope(defaultScope, a.Nobr) {
-				p.inBodyEndTagFormatting(a.Nobr)
-				p.reconstructActiveFormattingElements()
-			}
-			p.addFormattingElement()
-		case a.Applet, a.Marquee, a.Object:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.afe = append(p.afe, &scopeMarker)
-			p.framesetOK = false
-		case a.Table:
-			if !p.quirks {
-				p.popUntil(buttonScope, a.P)
-			}
-			p.addElement()
-			p.framesetOK = false
-			p.im = inTableIM
-			return true
-		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			if p.tok.DataAtom == a.Input {
-				for _, t := range p.tok.Attr {
-					if t.Key == "type" {
-						if strings.ToLower(t.Val) == "hidden" {
-							// Skip setting framesetOK = false
-							return true
-						}
-					}
-				}
-			}
-			p.framesetOK = false
-		case a.Param, a.Source, a.Track:
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-		case a.Hr:
-			p.popUntil(buttonScope, a.P)
-			p.addElement()
-			p.oe.pop()
-			p.acknowledgeSelfClosingTag()
-			p.framesetOK = false
-		case a.Image:
-			p.tok.DataAtom = a.Img
-			p.tok.Data = a.Img.String()
-			return false
-		case a.Isindex:
-			if p.form != nil {
-				// Ignore the token.
-				return true
-			}
-			action := ""
-			prompt := "This is a searchable index. Enter search keywords: "
-			attr := []Attribute{{Key: "name", Val: "isindex"}}
-			for _, t := range p.tok.Attr {
-				switch t.Key {
-				case "action":
-					action = t.Val
-				case "name":
-					// Ignore the attribute.
-				case "prompt":
-					prompt = t.Val
-				default:
-					attr = append(attr, t)
-				}
-			}
-			p.acknowledgeSelfClosingTag()
-			p.popUntil(buttonScope, a.P)
-			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
-			if p.form == nil {
-				// NOTE: The 'isindex' element has been removed,
-				// and the 'template' element has not been designed to be
-				// collaborative with the index element.
-				//
-				// Ignore the token.
-				return true
-			}
-			if action != "" {
-				p.form.Attr = []Attribute{{Key: "action", Val: action}}
-			}
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
-			p.addText(prompt)
-			p.addChild(&Node{
-				Type:     ElementNode,
-				DataAtom: a.Input,
-				Data:     a.Input.String(),
-				Attr:     attr,
-			})
-			p.oe.pop()
-			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
-			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
-			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
-		case a.Textarea:
-			p.addElement()
-			p.setOriginalIM()
-			p.framesetOK = false
-			p.im = textIM
-		case a.Xmp:
-			p.popUntil(buttonScope, a.P)
-			p.reconstructActiveFormattingElements()
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Iframe:
-			p.framesetOK = false
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Noembed, a.Noscript:
-			p.addElement()
-			p.setOriginalIM()
-			p.im = textIM
-		case a.Select:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-			p.framesetOK = false
-			p.im = inSelectIM
-			return true
-		case a.Optgroup, a.Option:
-			if p.top().DataAtom == a.Option {
-				p.oe.pop()
-			}
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		case a.Rb, a.Rtc:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags()
-			}
-			p.addElement()
-		case a.Rp, a.Rt:
-			if p.elementInScope(defaultScope, a.Ruby) {
-				p.generateImpliedEndTags("rtc")
-			}
-			p.addElement()
-		case a.Math, a.Svg:
-			p.reconstructActiveFormattingElements()
-			if p.tok.DataAtom == a.Math {
-				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
-			} else {
-				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
-			}
-			adjustForeignAttributes(p.tok.Attr)
-			p.addElement()
-			p.top().Namespace = p.tok.Data
-			if p.hasSelfClosingToken {
-				p.oe.pop()
-				p.acknowledgeSelfClosingTag()
-			}
-			return true
-		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
-			// Ignore the token.
-		default:
-			p.reconstructActiveFormattingElements()
-			p.addElement()
-		}
-	case EndTagToken:
-		switch p.tok.DataAtom {
-		case a.Body:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.im = afterBodyIM
-			}
-		case a.Html:
-			if p.elementInScope(defaultScope, a.Body) {
-				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
-				return false
-			}
-			return true
-		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.Form:
-			if p.oe.contains(a.Template) {
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if i == -1 {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				if p.oe[i].DataAtom != a.Form {
-					// Ignore the token.
-					return true
-				}
-				p.popUntil(defaultScope, a.Form)
-			} else {
-				node := p.form
-				p.form = nil
-				i := p.indexOfElementInScope(defaultScope, a.Form)
-				if node == nil || i == -1 || p.oe[i] != node {
-					// Ignore the token.
-					return true
-				}
-				p.generateImpliedEndTags()
-				p.oe.remove(node)
-			}
-		case a.P:
-			if !p.elementInScope(buttonScope, a.P) {
-				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
-			}
-			p.popUntil(buttonScope, a.P)
-		case a.Li:
-			p.popUntil(listItemScope, a.Li)
-		case a.Dd, a.Dt:
-			p.popUntil(defaultScope, p.tok.DataAtom)
-		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
-			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
-		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
-			p.inBodyEndTagFormatting(p.tok.DataAtom)
-		case a.Applet, a.Marquee, a.Object:
-			if p.popUntil(defaultScope, p.tok.DataAtom) {
-				p.clearActiveFormattingElements()
-			}
-		case a.Br:
-			p.tok.Type = StartTagToken
-			return false
-		case a.Template:
-			return inHeadIM(p)
-		default:
-			p.inBodyEndTagOther(p.tok.DataAtom)
-		}
-	case CommentToken:
-		p.addChild(&Node{
-			Type: CommentNode,
-			Data: p.tok.Data,
-		})
-	case ErrorToken:
-		// TODO: remove this divergence from the HTML5 spec.
-		if len(p.templateStack) > 0 {
-			p.im = inTemplateIM
-			return false
-		} else {
-			for _, e := range p.oe {
-				switch e.DataAtom {
-				case a.Dd, a.Dt, a.Li, a.Optgroup, a.Option, a.P, a.Rb, a.Rp, a.Rt, a.Rtc, a.Tbody, a.Td, a.Tfoot, a.Th,
-					a.Thead, a.Tr, a.Body, a.Html:
-				default:
-					return true
-				}
-			}
-		}
-	}
-
-	return true
-}
-
-func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
-	// This is the "adoption agency" algorithm, described at
-	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
-
-	// TODO: this is a fairly literal line-by-line translation of that algorithm.
-	// Once the code successfully parses the comprehensive test suite, we should
-	// refactor this code to be more idiomatic.
-
-	// Steps 1-4. The outer loop.
-	for i := 0; i < 8; i++ {
-		// Step 5. Find the formatting element.
-		var formattingElement *Node
-		for j := len(p.afe) - 1; j >= 0; j-- {
-			if p.afe[j].Type == scopeMarkerNode {
-				break
-			}
-			if p.afe[j].DataAtom == tagAtom {
-				formattingElement = p.afe[j]
-				break
-			}
-		}
-		if formattingElement == nil {
-			p.inBodyEndTagOther(tagAtom)
-			return
-		}
-		feIndex := p.oe.index(formattingElement)
-		if feIndex == -1 {
-			p.afe.remove(formattingElement)
-			return
-		}
-		if !p.elementInScope(defaultScope, tagAtom) {
-			// Ignore the tag.
-			return
-		}
-
-		// Steps 9-10. Find the furthest block.
-		var furthestBlock *Node
-		for _, e := range p.oe[feIndex:] {
-			if isSpecialElement(e) {
-				furthestBlock = e
-				break
-			}
-		}
-		if furthestBlock == nil {
-			e := p.oe.pop()
-			for e != formattingElement {
-				e = p.oe.pop()
-			}
-			p.afe.remove(e)
-			return
-		}
-
-		// Steps 11-12. Find the common ancestor and bookmark node.
-		commonAncestor := p.oe[feIndex-1]
-		bookmark := p.afe.index(formattingElement)
-
-		// Step 13. The inner loop. Find the lastNode to reparent.
-		lastNode := furthestBlock
-		node := furthestBlock
-		x := p.oe.index(node)
-		// Steps 13.1-13.2
-		for j := 0; j < 3; j++ {
-			// Step 13.3.
-			x--
-			node = p.oe[x]
-			// Step 13.4 - 13.5.
-			if p.afe.index(node) == -1 {
-				p.oe.remove(node)
-				continue
-			}
-			// Step 13.6.
-			if node == formattingElement {
-				break
-			}
-			// Step 13.7.
-			clone := node.clone()
-			p.afe[p.afe.index(node)] = clone
-			p.oe[p.oe.index(node)] = clone
-			node = clone
-			// Step 13.8.
-			if lastNode == furthestBlock {
-				bookmark = p.afe.index(node) + 1
-			}
-			// Step 13.9.
-			if lastNode.Parent != nil {
-				lastNode.Parent.RemoveChild(lastNode)
-			}
-			node.AppendChild(lastNode)
-			// Step 13.10.
-			lastNode = node
-		}
-
-		// Step 14. Reparent lastNode to the common ancestor,
-		// or for misnested table nodes, to the foster parent.
-		if lastNode.Parent != nil {
-			lastNode.Parent.RemoveChild(lastNode)
-		}
-		switch commonAncestor.DataAtom {
-		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
-			p.fosterParent(lastNode)
-		default:
-			commonAncestor.AppendChild(lastNode)
-		}
-
-		// Steps 15-17. Reparent nodes from the furthest block's children
-		// to a clone of the formatting element.
-		clone := formattingElement.clone()
-		reparentChildren(clone, furthestBlock)
-		furthestBlock.AppendChild(clone)
-
-		// Step 18. Fix up the list of active formatting elements.
-		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
-			// Move the bookmark with the rest of the list.
-			bookmark--
-		}
-		p.afe.remove(formattingElement)
-		p.afe.insert(bookmark, clone)
-
-		// Step 19. Fix up the stack of open elements.
-		p.oe.remove(formattingElement)
-		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
-	}
-}
-
-// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
-// "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
-// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
-func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
-	for i := len(p.oe) - 1; i >= 0; i-- {
-		if p.oe[i].DataAtom == tagAtom {
-			p.oe = p.oe[:i]
-			break
-		}
-		if isSpecialElement(p.oe[i]) {
-			break
-		}
-	}
-}
-
-// Section 12.2.6.4.8.
-func textIM(p *parser) bool {
-	switch p.tok.Type {
-	case ErrorToken:
-		p.oe.pop()
-	case TextToken:
-		d := p.tok.Data
-		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
-			// Ignore a newline at the start of a