+
+#define Ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
+#define Maj(x,y,z) (((x) & (y)) | ((z) & ((x) | (y))))
+#define Sigma0(x) (((x) >> 2 | (x) << 30) ^ ((x) >> 13 | (x) << 19) ^ ((x) >> 22 | (x) << 10))
+#define Sigma1(x) (((x) >> 6 | (x) << 26) ^ ((x) >> 11 | (x) << 21) ^ ((x) >> 25 | (x) << 7))
+#define sigma0(x) (((x) >> 7 | (x) << 25) ^ ((x) >> 18 | (x) << 14) ^ ((x) >> 3))
+#define sigma1(x) (((x) >> 17 | (x) << 15) ^ ((x) >> 19 | (x) << 13) ^ ((x) >> 10))
+
+#define Round(a,b,c,d,e,f,g,h,k,w) do { \
+ uint32_t t1 = (h) + Sigma1(e) + Ch((e), (f), (g)) + (k) + (w); \
+ uint32_t t2 = Sigma0(a) + Maj((a), (b), (c)); \
+ (d) += t1; \
+ (h) = t1 + t2; \
+} while(0)
+
+#ifdef WORDS_BIGENDIAN
+#define BE32(x) (x)
+#else
+#define BE32(p) ((((p) & 0xFF) << 24) | (((p) & 0xFF00) << 8) | (((p) & 0xFF0000) >> 8) | (((p) & 0xFF000000) >> 24))
+#endif
+
+static void secp256k1_sha256_initialize(secp256k1_sha256_t *hash) {
+ hash->s[0] = 0x6a09e667ul;
+ hash->s[1] = 0xbb67ae85ul;
+ hash->s[2] = 0x3c6ef372ul;
+ hash->s[3] = 0xa54ff53aul;
+ hash->s[4] = 0x510e527ful;
+ hash->s[5] = 0x9b05688cul;
+ hash->s[6] = 0x1f83d9abul;
+ hash->s[7] = 0x5be0cd19ul;
+ hash->bytes = 0;
+}
+
+/** Perform one SHA-256 transformation, processing 16 big endian 32-bit words. */
+static void secp256k1_sha256_transform(uint32_t* s, const uint32_t* chunk) {
+ uint32_t a = s[0], b = s[1], c = s[2], d = s[3], e = s[4], f = s[5], g = s[6], h = s[7];
+ uint32_t w0, w1, w2, w3, w4, w5, w6, w7, w8, w9, w10, w11, w12, w13, w14, w15;
+
+ Round(a, b, c, d, e, f, g, h, 0x428a2f98, w0 = BE32(chunk[0]));
+ Round(h, a, b, c, d, e, f, g, 0x71374491, w1 = BE32(chunk[1]));
+ Round(g, h, a, b, c, d, e, f, 0xb5c0fbcf, w2 = BE32(chunk[2]));
+ Round(f, g, h, a, b, c, d, e, 0xe9b5dba5, w3 = BE32(chunk[3]));
+ Round(e, f, g, h, a, b, c, d, 0x3956c25b, w4 = BE32(chunk[4]));
+ Round(d, e, f, g, h, a, b, c, 0x59f111f1, w5 = BE32(chunk[5]));
+ Round(c, d, e, f, g, h, a, b, 0x923f82a4, w6 = BE32(chunk[6]));
+ Round(b, c, d, e, f, g, h, a, 0xab1c5ed5, w7 = BE32(chunk[7]));
+ Round(a, b, c, d, e, f, g, h, 0xd807aa98, w8 = BE32(chunk[8]));
+ Round(h, a, b, c, d, e, f, g, 0x12835b01, w9 = BE32(chunk[9]));
+ Round(g, h, a, b, c, d, e, f, 0x243185be, w10 = BE32(chunk[10]));
+ Round(f, g, h, a, b, c, d, e, 0x550c7dc3, w11 = BE32(chunk[11]));
+ Round(e, f, g, h, a, b, c, d, 0x72be5d74, w12 = BE32(chunk[12]));
+ Round(d, e, f, g, h, a, b, c, 0x80deb1fe, w13 = BE32(chunk[13]));
+ Round(c, d, e, f, g, h, a, b, 0x9bdc06a7, w14 = BE32(chunk[14]));
+ Round(b, c, d, e, f, g, h, a, 0xc19bf174, w15 = BE32(chunk[15]));
+
+ Round(a, b, c, d, e, f, g, h, 0xe49b69c1, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0xefbe4786, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x0fc19dc6, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x240ca1cc, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x2de92c6f, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4a7484aa, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5cb0a9dc, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x76f988da, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x983e5152, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa831c66d, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xb00327c8, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xbf597fc7, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xc6e00bf3, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd5a79147, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0x06ca6351, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x14292967, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x27b70a85, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x2e1b2138, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x4d2c6dfc, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x53380d13, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x650a7354, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x766a0abb, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x81c2c92e, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x92722c85, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0xa2bfe8a1, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0xa81a664b, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0xc24b8b70, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0xc76c51a3, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0xd192e819, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xd6990624, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xf40e3585, w14 += sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0x106aa070, w15 += sigma1(w13) + w8 + sigma0(w0));
+
+ Round(a, b, c, d, e, f, g, h, 0x19a4c116, w0 += sigma1(w14) + w9 + sigma0(w1));
+ Round(h, a, b, c, d, e, f, g, 0x1e376c08, w1 += sigma1(w15) + w10 + sigma0(w2));
+ Round(g, h, a, b, c, d, e, f, 0x2748774c, w2 += sigma1(w0) + w11 + sigma0(w3));
+ Round(f, g, h, a, b, c, d, e, 0x34b0bcb5, w3 += sigma1(w1) + w12 + sigma0(w4));
+ Round(e, f, g, h, a, b, c, d, 0x391c0cb3, w4 += sigma1(w2) + w13 + sigma0(w5));
+ Round(d, e, f, g, h, a, b, c, 0x4ed8aa4a, w5 += sigma1(w3) + w14 + sigma0(w6));
+ Round(c, d, e, f, g, h, a, b, 0x5b9cca4f, w6 += sigma1(w4) + w15 + sigma0(w7));
+ Round(b, c, d, e, f, g, h, a, 0x682e6ff3, w7 += sigma1(w5) + w0 + sigma0(w8));
+ Round(a, b, c, d, e, f, g, h, 0x748f82ee, w8 += sigma1(w6) + w1 + sigma0(w9));
+ Round(h, a, b, c, d, e, f, g, 0x78a5636f, w9 += sigma1(w7) + w2 + sigma0(w10));
+ Round(g, h, a, b, c, d, e, f, 0x84c87814, w10 += sigma1(w8) + w3 + sigma0(w11));
+ Round(f, g, h, a, b, c, d, e, 0x8cc70208, w11 += sigma1(w9) + w4 + sigma0(w12));
+ Round(e, f, g, h, a, b, c, d, 0x90befffa, w12 += sigma1(w10) + w5 + sigma0(w13));
+ Round(d, e, f, g, h, a, b, c, 0xa4506ceb, w13 += sigma1(w11) + w6 + sigma0(w14));
+ Round(c, d, e, f, g, h, a, b, 0xbef9a3f7, w14 + sigma1(w12) + w7 + sigma0(w15));
+ Round(b, c, d, e, f, g, h, a, 0xc67178f2, w15 + sigma1(w13) + w8 + sigma0(w0));
+
+ s[0] += a;
+ s[1] += b;
+ s[2] += c;
+ s[3] += d;
+ s[4] += e;
+ s[5] += f;
+ s[6] += g;
+ s[7] += h;
+}
+
+static void secp256k1_sha256_write(secp256k1_sha256_t *hash, const unsigned char *data, size_t len) {
+ size_t bufsize = hash->bytes & 0x3F;
+ hash->bytes += len;
+ while (bufsize + len >= 64) {
+ /* Fill the buffer, and process it. */
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, 64 - bufsize);
+ data += 64 - bufsize;
+ len -= 64 - bufsize;
+ secp256k1_sha256_transform(hash->s, hash->buf);
+ bufsize = 0;
+ }
+ if (len) {
+ /* Fill the buffer with what remains. */
+ memcpy(((unsigned char*)hash->buf) + bufsize, data, len);
+ }
+}
+
+static void secp256k1_sha256_finalize(secp256k1_sha256_t *hash, unsigned char *out32) {
+ static const unsigned char pad[64] = {0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ uint32_t sizedesc[2];
+ uint32_t out[8];
+ int i = 0;
+ sizedesc[0] = BE32(hash->bytes >> 29);
+ sizedesc[1] = BE32(hash->bytes << 3);
+ secp256k1_sha256_write(hash, pad, 1 + ((119 - (hash->bytes % 64)) % 64));
+ secp256k1_sha256_write(hash, (const unsigned char*)sizedesc, 8);
+ for (i = 0; i < 8; i++) {
+ out[i] = BE32(hash->s[i]);
+ hash->s[i] = 0;
+ }
+ memcpy(out32, (const unsigned char*)out, 32);
+}
+
+static void secp256k1_hmac_sha256_initialize(secp256k1_hmac_sha256_t *hash, const unsigned char *key, size_t keylen) {
+ int n;
+ unsigned char rkey[64];
+ if (keylen <= 64) {
+ memcpy(rkey, key, keylen);
+ memset(rkey + keylen, 0, 64 - keylen);
+ } else {
+ secp256k1_sha256_t sha256;
+ secp256k1_sha256_initialize(&sha256);
+ secp256k1_sha256_write(&sha256, key, keylen);
+ secp256k1_sha256_finalize(&sha256, rkey);
+ memset(rkey + 32, 0, 32);
+ }
+
+ secp256k1_sha256_initialize(&hash->outer);
+ for (n = 0; n < 64; n++) {
+ rkey[n] ^= 0x5c;
+ }
+ secp256k1_sha256_write(&hash->outer, rkey, 64);
+
+ secp256k1_sha256_initialize(&hash->inner);
+ for (n = 0; n < 64; n++) {
+ rkey[n] ^= 0x5c ^ 0x36;
+ }
+ secp256k1_sha256_write(&hash->inner, rkey, 64);
+ memset(rkey, 0, 64);
+}
+
+static void secp256k1_hmac_sha256_write(secp256k1_hmac_sha256_t *hash, const unsigned char *data, size_t size) {
+ secp256k1_sha256_write(&hash->inner, data, size);
+}
+
+static void secp256k1_hmac_sha256_finalize(secp256k1_hmac_sha256_t *hash, unsigned char *out32) {
+ unsigned char temp[32];
+ secp256k1_sha256_finalize(&hash->inner, temp);
+ secp256k1_sha256_write(&hash->outer, temp, 32);
+ memset(temp, 0, 32);
+ secp256k1_sha256_finalize(&hash->outer, out32);
+}
+
+
+static void secp256k1_rfc6979_hmac_sha256_initialize(secp256k1_rfc6979_hmac_sha256_t *rng, const unsigned char *key, size_t keylen) {
+ secp256k1_hmac_sha256_t hmac;
+ static const unsigned char zero[1] = {0x00};
+ static const unsigned char one[1] = {0x01};
+
+ memset(rng->v, 0x01, 32); /* RFC6979 3.2.b. */
+ memset(rng->k, 0x00, 32); /* RFC6979 3.2.c. */
+
+ /* RFC6979 3.2.d. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+
+ /* RFC6979 3.2.f. */
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, one, 1);
+ secp256k1_hmac_sha256_write(&hmac, key, keylen);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ rng->retry = 0;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_generate(secp256k1_rfc6979_hmac_sha256_t *rng, unsigned char *out, size_t outlen) {
+ /* RFC6979 3.2.h. */
+ static const unsigned char zero[1] = {0x00};
+ if (rng->retry) {
+ secp256k1_hmac_sha256_t hmac;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_write(&hmac, zero, 1);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->k);
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ }
+
+ while (outlen > 0) {
+ secp256k1_hmac_sha256_t hmac;
+ int now = outlen;
+ secp256k1_hmac_sha256_initialize(&hmac, rng->k, 32);
+ secp256k1_hmac_sha256_write(&hmac, rng->v, 32);
+ secp256k1_hmac_sha256_finalize(&hmac, rng->v);
+ if (now > 32) {
+ now = 32;
+ }
+ memcpy(out, rng->v, now);
+ out += now;
+ outlen -= now;
+ }
+
+ rng->retry = 1;
+}
+
+static void secp256k1_rfc6979_hmac_sha256_finalize(secp256k1_rfc6979_hmac_sha256_t *rng) {
+ memset(rng->k, 0, 32);
+ memset(rng->v, 0, 32);
+ rng->retry = 0;
+}
+
+#undef BE32
+#undef Round
+#undef sigma1
+#undef sigma0
+#undef Sigma1
+#undef Sigma0
+#undef Maj
+#undef Ch
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
new file mode 100644
index 000000000..1c67802fb
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1.java
@@ -0,0 +1,446 @@
+/*
+ * Copyright 2013 Google Inc.
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import java.math.BigInteger;
+import com.google.common.base.Preconditions;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import static org.bitcoin.NativeSecp256k1Util.*;
+
+/**
+ * This class holds native methods to handle ECDSA verification.
+ *
+ * You can find an example library that can be used for this at https://github.com/bitcoin/secp256k1
+ *
+ * To build secp256k1 for use with bitcoinj, run
+ * `./configure --enable-jni --enable-experimental --enable-module-ecdh`
+ * and `make` then copy `.libs/libsecp256k1.so` to your system library path
+ * or point the JVM to the folder containing it with -Djava.library.path
+ *
+ */
+public class NativeSecp256k1 {
+
+ private static final ReentrantReadWriteLock rwl = new ReentrantReadWriteLock();
+ private static final Lock r = rwl.readLock();
+ private static final Lock w = rwl.writeLock();
+ private static ThreadLocal nativeECDSABuffer = new ThreadLocal();
+ /**
+ * Verifies the given secp256k1 signature in native code.
+ * Calling when enabled == false is undefined (probably library not loaded)
+ *
+ * @param data The data which was signed, must be exactly 32 bytes
+ * @param signature The signature
+ * @param pub The public key which did the signing
+ */
+ public static boolean verify(byte[] data, byte[] signature, byte[] pub) throws AssertFailException{
+ Preconditions.checkArgument(data.length == 32 && signature.length <= 520 && pub.length <= 520);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 520) {
+ byteBuff = ByteBuffer.allocateDirect(520);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(data);
+ byteBuff.put(signature);
+ byteBuff.put(pub);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ return secp256k1_ecdsa_verify(byteBuff, Secp256k1Context.getContext(), signature.length, pub.length) == 1;
+ } finally {
+ r.unlock();
+ }
+ }
+
+ /**
+ * libsecp256k1 Create an ECDSA signature.
+ *
+ * @param data Message hash, 32 bytes
+ * @param key Secret key, 32 bytes
+ *
+ * Return values
+ * @param sig byte array of signature
+ */
+ public static byte[] sign(byte[] data, byte[] sec) throws AssertFailException{
+ Preconditions.checkArgument(data.length == 32 && sec.length <= 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 32 + 32) {
+ byteBuff = ByteBuffer.allocateDirect(32 + 32);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(data);
+ byteBuff.put(sec);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ retByteArray = secp256k1_ecdsa_sign(byteBuff, Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] sigArr = retByteArray[0];
+ int sigLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(sigArr.length, sigLen, "Got bad signature length.");
+
+ return retVal == 0 ? new byte[0] : sigArr;
+ }
+
+ /**
+ * libsecp256k1 Seckey Verify - returns 1 if valid, 0 if invalid
+ *
+ * @param seckey ECDSA Secret key, 32 bytes
+ */
+ public static boolean secKeyVerify(byte[] seckey) {
+ Preconditions.checkArgument(seckey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seckey.length) {
+ byteBuff = ByteBuffer.allocateDirect(seckey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+
+ r.lock();
+ try {
+ return secp256k1_ec_seckey_verify(byteBuff,Secp256k1Context.getContext()) == 1;
+ } finally {
+ r.unlock();
+ }
+ }
+
+
+ /**
+ * libsecp256k1 Compute Pubkey - computes public key from secret key
+ *
+ * @param seckey ECDSA Secret key, 32 bytes
+ *
+ * Return values
+ * @param pubkey ECDSA Public key, 33 or 65 bytes
+ */
+ //TODO add a 'compressed' arg
+ public static byte[] computePubkey(byte[] seckey) throws AssertFailException{
+ Preconditions.checkArgument(seckey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seckey.length) {
+ byteBuff = ByteBuffer.allocateDirect(seckey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+
+ byte[][] retByteArray;
+
+ r.lock();
+ try {
+ retByteArray = secp256k1_ec_pubkey_create(byteBuff, Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+ int pubLen = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ return retVal == 0 ? new byte[0]: pubArr;
+ }
+
+ /**
+ * libsecp256k1 Cleanup - This destroys the secp256k1 context object
+ * This should be called at the end of the program for proper cleanup of the context.
+ */
+ public static synchronized void cleanup() {
+ w.lock();
+ try {
+ secp256k1_destroy_context(Secp256k1Context.getContext());
+ } finally {
+ w.unlock();
+ }
+ }
+
+ public static long cloneContext() {
+ r.lock();
+ try {
+ return secp256k1_ctx_clone(Secp256k1Context.getContext());
+ } finally { r.unlock(); }
+ }
+
+ /**
+ * libsecp256k1 PrivKey Tweak-Mul - Tweak privkey by multiplying to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param seckey 32-byte seckey
+ */
+ public static byte[] privKeyTweakMul(byte[] privkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(privkey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(privkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_privkey_tweak_mul(byteBuff,Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] privArr = retByteArray[0];
+
+ int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(privArr.length, privLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return privArr;
+ }
+
+ /**
+ * libsecp256k1 PrivKey Tweak-Add - Tweak privkey by adding to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param seckey 32-byte seckey
+ */
+ public static byte[] privKeyTweakAdd(byte[] privkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(privkey.length == 32);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < privkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(privkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(privkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_privkey_tweak_add(byteBuff,Secp256k1Context.getContext());
+ } finally {
+ r.unlock();
+ }
+
+ byte[] privArr = retByteArray[0];
+
+ int privLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(privArr.length, privLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return privArr;
+ }
+
+ /**
+ * libsecp256k1 PubKey Tweak-Add - Tweak pubkey by adding to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param pubkey 32-byte seckey
+ */
+ public static byte[] pubKeyTweakAdd(byte[] pubkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(pubkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_pubkey_tweak_add(byteBuff,Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+
+ int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return pubArr;
+ }
+
+ /**
+ * libsecp256k1 PubKey Tweak-Mul - Tweak pubkey by multiplying to it
+ *
+ * @param tweak some bytes to tweak with
+ * @param pubkey 32-byte seckey
+ */
+ public static byte[] pubKeyTweakMul(byte[] pubkey, byte[] tweak) throws AssertFailException{
+ Preconditions.checkArgument(pubkey.length == 33 || pubkey.length == 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < pubkey.length + tweak.length) {
+ byteBuff = ByteBuffer.allocateDirect(pubkey.length + tweak.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(pubkey);
+ byteBuff.put(tweak);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_pubkey_tweak_mul(byteBuff,Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] pubArr = retByteArray[0];
+
+ int pubLen = (byte) new BigInteger(new byte[] { retByteArray[1][0] }).intValue() & 0xFF;
+ int retVal = new BigInteger(new byte[] { retByteArray[1][1] }).intValue();
+
+ assertEquals(pubArr.length, pubLen, "Got bad pubkey length.");
+
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return pubArr;
+ }
+
+ /**
+ * libsecp256k1 create ECDH secret - constant time ECDH calculation
+ *
+ * @param seckey byte array of secret key used in exponentiaion
+ * @param pubkey byte array of public key used in exponentiaion
+ */
+ public static byte[] createECDHSecret(byte[] seckey, byte[] pubkey) throws AssertFailException{
+ Preconditions.checkArgument(seckey.length <= 32 && pubkey.length <= 65);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < 32 + pubkey.length) {
+ byteBuff = ByteBuffer.allocateDirect(32 + pubkey.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seckey);
+ byteBuff.put(pubkey);
+
+ byte[][] retByteArray;
+ r.lock();
+ try {
+ retByteArray = secp256k1_ecdh(byteBuff, Secp256k1Context.getContext(), pubkey.length);
+ } finally {
+ r.unlock();
+ }
+
+ byte[] resArr = retByteArray[0];
+ int retVal = new BigInteger(new byte[] { retByteArray[1][0] }).intValue();
+
+ assertEquals(resArr.length, 32, "Got bad result length.");
+ assertEquals(retVal, 1, "Failed return value check.");
+
+ return resArr;
+ }
+
+ /**
+ * libsecp256k1 randomize - updates the context randomization
+ *
+ * @param seed 32-byte random seed
+ */
+ public static synchronized boolean randomize(byte[] seed) throws AssertFailException{
+ Preconditions.checkArgument(seed.length == 32 || seed == null);
+
+ ByteBuffer byteBuff = nativeECDSABuffer.get();
+ if (byteBuff == null || byteBuff.capacity() < seed.length) {
+ byteBuff = ByteBuffer.allocateDirect(seed.length);
+ byteBuff.order(ByteOrder.nativeOrder());
+ nativeECDSABuffer.set(byteBuff);
+ }
+ byteBuff.rewind();
+ byteBuff.put(seed);
+
+ w.lock();
+ try {
+ return secp256k1_context_randomize(byteBuff, Secp256k1Context.getContext()) == 1;
+ } finally {
+ w.unlock();
+ }
+ }
+
+ private static native long secp256k1_ctx_clone(long context);
+
+ private static native int secp256k1_context_randomize(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_privkey_tweak_add(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_privkey_tweak_mul(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_pubkey_tweak_add(ByteBuffer byteBuff, long context, int pubLen);
+
+ private static native byte[][] secp256k1_pubkey_tweak_mul(ByteBuffer byteBuff, long context, int pubLen);
+
+ private static native void secp256k1_destroy_context(long context);
+
+ private static native int secp256k1_ecdsa_verify(ByteBuffer byteBuff, long context, int sigLen, int pubLen);
+
+ private static native byte[][] secp256k1_ecdsa_sign(ByteBuffer byteBuff, long context);
+
+ private static native int secp256k1_ec_seckey_verify(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_ec_pubkey_create(ByteBuffer byteBuff, long context);
+
+ private static native byte[][] secp256k1_ec_pubkey_parse(ByteBuffer byteBuff, long context, int inputLen);
+
+ private static native byte[][] secp256k1_ecdh(ByteBuffer byteBuff, long context, int inputLen);
+
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
new file mode 100644
index 000000000..c00d08899
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Test.java
@@ -0,0 +1,226 @@
+package org.bitcoin;
+
+import com.google.common.io.BaseEncoding;
+import java.util.Arrays;
+import java.math.BigInteger;
+import javax.xml.bind.DatatypeConverter;
+import static org.bitcoin.NativeSecp256k1Util.*;
+
+/**
+ * This class holds test cases defined for testing this library.
+ */
+public class NativeSecp256k1Test {
+
+ //TODO improve comments/add more tests
+ /**
+ * This tests verify() for a valid signature
+ */
+ public static void testVerifyPos() throws AssertFailException{
+ boolean result = false;
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ result = NativeSecp256k1.verify( data, sig, pub);
+ assertEquals( result, true , "testVerifyPos");
+ }
+
+ /**
+ * This tests verify() for a non-valid signature
+ */
+ public static void testVerifyNeg() throws AssertFailException{
+ boolean result = false;
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A91".toLowerCase()); //sha256hash of "testing"
+ byte[] sig = BaseEncoding.base16().lowerCase().decode("3044022079BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F817980220294F14E883B3F525B5367756C2A11EF6CF84B730B36C17CB0C56F0AAB2C98589".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ result = NativeSecp256k1.verify( data, sig, pub);
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, false , "testVerifyNeg");
+ }
+
+ /**
+ * This tests secret key verify() for a valid secretkey
+ */
+ public static void testSecKeyVerifyPos() throws AssertFailException{
+ boolean result = false;
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ result = NativeSecp256k1.secKeyVerify( sec );
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, true , "testSecKeyVerifyPos");
+ }
+
+ /**
+ * This tests secret key verify() for a invalid secretkey
+ */
+ public static void testSecKeyVerifyNeg() throws AssertFailException{
+ boolean result = false;
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ result = NativeSecp256k1.secKeyVerify( sec );
+ //System.out.println(" TEST " + new BigInteger(1, resultbytes).toString(16));
+ assertEquals( result, false , "testSecKeyVerifyNeg");
+ }
+
+ /**
+ * This tests public key create() for a valid secretkey
+ */
+ public static void testPubKeyCreatePos() throws AssertFailException{
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.computePubkey( sec);
+ String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( pubkeyString , "04C591A8FF19AC9C4E4E5793673B83123437E975285E7B442F4EE2654DFFCA5E2D2103ED494718C697AC9AEBCFD19612E224DB46661011863ED2FC54E71861E2A6" , "testPubKeyCreatePos");
+ }
+
+ /**
+ * This tests public key create() for a invalid secretkey
+ */
+ public static void testPubKeyCreateNeg() throws AssertFailException{
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.computePubkey( sec);
+ String pubkeyString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( pubkeyString, "" , "testPubKeyCreateNeg");
+ }
+
+ /**
+ * This tests sign() for a valid secretkey
+ */
+ public static void testSignPos() throws AssertFailException{
+
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.sign(data, sec);
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString, "30440220182A108E1448DC8F1FB467D06A0F3BB8EA0533584CB954EF8DA112F1D60E39A202201C66F36DA211C087F3AF88B50EDF4F9BDAA6CF5FD6817E74DCA34DB12390C6E9" , "testSignPos");
+ }
+
+ /**
+ * This tests sign() for a invalid secretkey
+ */
+ public static void testSignNeg() throws AssertFailException{
+ byte[] data = BaseEncoding.base16().lowerCase().decode("CF80CD8AED482D5D1527D7DC72FCEFF84E6326592848447D2DC0B0E87DFC9A90".toLowerCase()); //sha256hash of "testing"
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.sign(data, sec);
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString, "" , "testSignNeg");
+ }
+
+ /**
+ * This tests private key tweak-add
+ */
+ public static void testPrivKeyTweakAdd_1() throws AssertFailException {
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.privKeyTweakAdd( sec , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "A168571E189E6F9A7E2D657A4B53AE99B909F7E712D1C23CED28093CD57C88F3" , "testPrivKeyAdd_1");
+ }
+
+ /**
+ * This tests private key tweak-mul
+ */
+ public static void testPrivKeyTweakMul_1() throws AssertFailException {
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.privKeyTweakMul( sec , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "97F8184235F101550F3C71C927507651BD3F1CDB4A5A33B8986ACF0DEE20FFFC" , "testPrivKeyMul_1");
+ }
+
+ /**
+ * This tests private key tweak-add uncompressed
+ */
+ public static void testPrivKeyTweakAdd_2() throws AssertFailException {
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.pubKeyTweakAdd( pub , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "0411C6790F4B663CCE607BAAE08C43557EDC1A4D11D88DFCB3D841D0C6A941AF525A268E2A863C148555C48FB5FBA368E88718A46E205FABC3DBA2CCFFAB0796EF" , "testPrivKeyAdd_2");
+ }
+
+ /**
+ * This tests private key tweak-mul uncompressed
+ */
+ public static void testPrivKeyTweakMul_2() throws AssertFailException {
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+ byte[] data = BaseEncoding.base16().lowerCase().decode("3982F19BEF1615BCCFBB05E321C10E1D4CBA3DF0E841C2E41EEB6016347653C3".toLowerCase()); //sha256hash of "tweak"
+
+ byte[] resultArr = NativeSecp256k1.pubKeyTweakMul( pub , data );
+ String sigString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( sigString , "04E0FE6FE55EBCA626B98A807F6CAF654139E14E5E3698F01A9A658E21DC1D2791EC060D4F412A794D5370F672BC94B722640B5F76914151CFCA6E712CA48CC589" , "testPrivKeyMul_2");
+ }
+
+ /**
+ * This tests seed randomization
+ */
+ public static void testRandomize() throws AssertFailException {
+ byte[] seed = BaseEncoding.base16().lowerCase().decode("A441B15FE9A3CF56661190A0B93B9DEC7D04127288CC87250967CF3B52894D11".toLowerCase()); //sha256hash of "random"
+ boolean result = NativeSecp256k1.randomize(seed);
+ assertEquals( result, true, "testRandomize");
+ }
+
+ public static void testCreateECDHSecret() throws AssertFailException{
+
+ byte[] sec = BaseEncoding.base16().lowerCase().decode("67E56582298859DDAE725F972992A07C6C4FB9F62A8FFF58CE3CA926A1063530".toLowerCase());
+ byte[] pub = BaseEncoding.base16().lowerCase().decode("040A629506E1B65CD9D2E0BA9C75DF9C4FED0DB16DC9625ED14397F0AFC836FAE595DC53F8B0EFE61E703075BD9B143BAC75EC0E19F82A2208CAEB32BE53414C40".toLowerCase());
+
+ byte[] resultArr = NativeSecp256k1.createECDHSecret(sec, pub);
+ String ecdhString = javax.xml.bind.DatatypeConverter.printHexBinary(resultArr);
+ assertEquals( ecdhString, "2A2A67007A926E6594AF3EB564FC74005B37A9C8AEF2033C4552051B5C87F043" , "testCreateECDHSecret");
+ }
+
+ public static void main(String[] args) throws AssertFailException{
+
+
+ System.out.println("\n libsecp256k1 enabled: " + Secp256k1Context.isEnabled() + "\n");
+
+ assertEquals( Secp256k1Context.isEnabled(), true, "isEnabled" );
+
+ //Test verify() success/fail
+ testVerifyPos();
+ testVerifyNeg();
+
+ //Test secKeyVerify() success/fail
+ testSecKeyVerifyPos();
+ testSecKeyVerifyNeg();
+
+ //Test computePubkey() success/fail
+ testPubKeyCreatePos();
+ testPubKeyCreateNeg();
+
+ //Test sign() success/fail
+ testSignPos();
+ testSignNeg();
+
+ //Test privKeyTweakAdd() 1
+ testPrivKeyTweakAdd_1();
+
+ //Test privKeyTweakMul() 2
+ testPrivKeyTweakMul_1();
+
+ //Test privKeyTweakAdd() 3
+ testPrivKeyTweakAdd_2();
+
+ //Test privKeyTweakMul() 4
+ testPrivKeyTweakMul_2();
+
+ //Test randomize()
+ testRandomize();
+
+ //Test ECDH
+ testCreateECDHSecret();
+
+ NativeSecp256k1.cleanup();
+
+ System.out.println(" All tests passed." );
+
+ }
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java
new file mode 100644
index 000000000..04732ba04
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/NativeSecp256k1Util.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+public class NativeSecp256k1Util{
+
+ public static void assertEquals( int val, int val2, String message ) throws AssertFailException{
+ if( val != val2 )
+ throw new AssertFailException("FAIL: " + message);
+ }
+
+ public static void assertEquals( boolean val, boolean val2, String message ) throws AssertFailException{
+ if( val != val2 )
+ throw new AssertFailException("FAIL: " + message);
+ else
+ System.out.println("PASS: " + message);
+ }
+
+ public static void assertEquals( String val, String val2, String message ) throws AssertFailException{
+ if( !val.equals(val2) )
+ throw new AssertFailException("FAIL: " + message);
+ else
+ System.out.println("PASS: " + message);
+ }
+
+ public static class AssertFailException extends Exception {
+ public AssertFailException(String message) {
+ super( message );
+ }
+ }
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java
new file mode 100644
index 000000000..216c986a8
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org/bitcoin/Secp256k1Context.java
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2014-2016 the libsecp256k1 contributors
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.bitcoin;
+
+/**
+ * This class holds the context reference used in native methods
+ * to handle ECDSA operations.
+ */
+public class Secp256k1Context {
+ private static final boolean enabled; //true if the library is loaded
+ private static final long context; //ref to pointer to context obj
+
+ static { //static initializer
+ boolean isEnabled = true;
+ long contextRef = -1;
+ try {
+ System.loadLibrary("secp256k1");
+ contextRef = secp256k1_init_context();
+ } catch (UnsatisfiedLinkError e) {
+ System.out.println("UnsatisfiedLinkError: " + e.toString());
+ isEnabled = false;
+ }
+ enabled = isEnabled;
+ context = contextRef;
+ }
+
+ public static boolean isEnabled() {
+ return enabled;
+ }
+
+ public static long getContext() {
+ if(!enabled) return -1; //sanity check
+ return context;
+ }
+
+ private static native long secp256k1_init_context();
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
new file mode 100644
index 000000000..bcef7b32c
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.c
@@ -0,0 +1,377 @@
+#include
+#include
+#include
+#include "org_bitcoin_NativeSecp256k1.h"
+#include "include/secp256k1.h"
+#include "include/secp256k1_ecdh.h"
+#include "include/secp256k1_recovery.h"
+
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
+ (JNIEnv* env, jclass classObject, jlong ctx_l)
+{
+ const secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ jlong ctx_clone_l = (uintptr_t) secp256k1_context_clone(ctx);
+
+ (void)classObject;(void)env;
+
+ return ctx_clone_l;
+
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ const unsigned char* seed = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ (void)classObject;
+
+ return secp256k1_context_randomize(ctx, seed);
+
+}
+
+SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
+ (JNIEnv* env, jclass classObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ secp256k1_context_destroy(ctx);
+
+ (void)classObject;(void)env;
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint siglen, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+
+ unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* sigdata = { (unsigned char*) (data + 32) };
+ const unsigned char* pubdata = { (unsigned char*) (data + siglen + 32) };
+
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pubkey;
+
+ int ret = secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigdata, siglen);
+
+ if( ret ) {
+ ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
+
+ if( ret ) {
+ ret = secp256k1_ecdsa_verify(ctx, &sig, data, &pubkey);
+ }
+ }
+
+ (void)classObject;
+
+ return ret;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* data = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ unsigned char* secKey = (unsigned char*) (data + 32);
+
+ jobjectArray retArray;
+ jbyteArray sigArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ secp256k1_ecdsa_signature sig[72];
+
+ int ret = secp256k1_ecdsa_sign(ctx, sig, data, secKey, NULL, NULL );
+
+ unsigned char outputSer[72];
+ size_t outputLen = 72;
+
+ if( ret ) {
+ int ret2 = secp256k1_ecdsa_signature_serialize_der(ctx,outputSer, &outputLen, sig ); (void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ sigArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, sigArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, sigArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ (void)classObject;
+
+ return secp256k1_ec_seckey_verify(ctx, secKey);
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ const unsigned char* secKey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+
+ secp256k1_pubkey pubkey;
+
+ jobjectArray retArray;
+ jbyteArray pubkeyArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int ret = secp256k1_ec_pubkey_create(ctx, &pubkey, secKey);
+
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubkeyArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubkeyArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubkeyArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (privkey + 32);
+
+ jobjectArray retArray;
+ jbyteArray privArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int privkeylen = 32;
+
+ int ret = secp256k1_ec_privkey_tweak_add(ctx, privkey, tweak);
+
+ intsarray[0] = privkeylen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ privArray = (*env)->NewByteArray(env, privkeylen);
+ (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey);
+ (*env)->SetObjectArrayElement(env, retArray, 0, privArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* privkey = (unsigned char*) (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (privkey + 32);
+
+ jobjectArray retArray;
+ jbyteArray privArray, intsByteArray;
+ unsigned char intsarray[2];
+
+ int privkeylen = 32;
+
+ int ret = secp256k1_ec_privkey_tweak_mul(ctx, privkey, tweak);
+
+ intsarray[0] = privkeylen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ privArray = (*env)->NewByteArray(env, privkeylen);
+ (*env)->SetByteArrayRegion(env, privArray, 0, privkeylen, (jbyte*)privkey);
+ (*env)->SetObjectArrayElement(env, retArray, 0, privArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+/* secp256k1_pubkey* pubkey = (secp256k1_pubkey*) (*env)->GetDirectBufferAddress(env, byteBufferObject);*/
+ unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (pkey + publen);
+
+ jobjectArray retArray;
+ jbyteArray pubArray, intsByteArray;
+ unsigned char intsarray[2];
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ secp256k1_pubkey pubkey;
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
+
+ if( ret ) {
+ ret = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, tweak);
+ }
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ unsigned char* pkey = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* tweak = (unsigned char*) (pkey + publen);
+
+ jobjectArray retArray;
+ jbyteArray pubArray, intsByteArray;
+ unsigned char intsarray[2];
+ unsigned char outputSer[65];
+ size_t outputLen = 65;
+
+ secp256k1_pubkey pubkey;
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pkey, publen);
+
+ if ( ret ) {
+ ret = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, tweak);
+ }
+
+ if( ret ) {
+ int ret2 = secp256k1_ec_pubkey_serialize(ctx,outputSer, &outputLen, &pubkey,SECP256K1_EC_UNCOMPRESSED );(void)ret2;
+ }
+
+ intsarray[0] = outputLen;
+ intsarray[1] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ pubArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, pubArray, 0, outputLen, (jbyte*)outputSer);
+ (*env)->SetObjectArrayElement(env, retArray, 0, pubArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 2);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 2, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1pubkey_1combine
+ (JNIEnv * env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint numkeys)
+{
+ (void)classObject;(void)env;(void)byteBufferObject;(void)ctx_l;(void)numkeys;
+
+ return 0;
+}
+
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen)
+{
+ secp256k1_context *ctx = (secp256k1_context*)(uintptr_t)ctx_l;
+ const unsigned char* secdata = (*env)->GetDirectBufferAddress(env, byteBufferObject);
+ const unsigned char* pubdata = (const unsigned char*) (secdata + 32);
+
+ jobjectArray retArray;
+ jbyteArray outArray, intsByteArray;
+ unsigned char intsarray[1];
+ secp256k1_pubkey pubkey;
+ unsigned char nonce_res[32];
+ size_t outputLen = 32;
+
+ int ret = secp256k1_ec_pubkey_parse(ctx, &pubkey, pubdata, publen);
+
+ if (ret) {
+ ret = secp256k1_ecdh(
+ ctx,
+ nonce_res,
+ &pubkey,
+ secdata
+ );
+ }
+
+ intsarray[0] = ret;
+
+ retArray = (*env)->NewObjectArray(env, 2,
+ (*env)->FindClass(env, "[B"),
+ (*env)->NewByteArray(env, 1));
+
+ outArray = (*env)->NewByteArray(env, outputLen);
+ (*env)->SetByteArrayRegion(env, outArray, 0, 32, (jbyte*)nonce_res);
+ (*env)->SetObjectArrayElement(env, retArray, 0, outArray);
+
+ intsByteArray = (*env)->NewByteArray(env, 1);
+ (*env)->SetByteArrayRegion(env, intsByteArray, 0, 1, (jbyte*)intsarray);
+ (*env)->SetObjectArrayElement(env, retArray, 1, intsByteArray);
+
+ (void)classObject;
+
+ return retArray;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
new file mode 100644
index 000000000..fe613c9e9
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_NativeSecp256k1.h
@@ -0,0 +1,119 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+#include "include/secp256k1.h"
+/* Header for class org_bitcoin_NativeSecp256k1 */
+
+#ifndef _Included_org_bitcoin_NativeSecp256k1
+#define _Included_org_bitcoin_NativeSecp256k1
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ctx_clone
+ * Signature: (J)J
+ */
+SECP256K1_API jlong JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ctx_1clone
+ (JNIEnv *, jclass, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_context_randomize
+ * Signature: (Ljava/nio/ByteBuffer;J)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1context_1randomize
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_privkey_tweak_add
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1add
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_privkey_tweak_mul
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1privkey_1tweak_1mul
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_pubkey_tweak_add
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1add
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_pubkey_tweak_mul
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1pubkey_1tweak_1mul
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_destroy_context
+ * Signature: (J)V
+ */
+SECP256K1_API void JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1destroy_1context
+ (JNIEnv *, jclass, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdsa_verify
+ * Signature: (Ljava/nio/ByteBuffer;JII)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1verify
+ (JNIEnv *, jclass, jobject, jlong, jint, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdsa_sign
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdsa_1sign
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_seckey_verify
+ * Signature: (Ljava/nio/ByteBuffer;J)I
+ */
+SECP256K1_API jint JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1seckey_1verify
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_pubkey_create
+ * Signature: (Ljava/nio/ByteBuffer;J)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1create
+ (JNIEnv *, jclass, jobject, jlong);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ec_pubkey_parse
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ec_1pubkey_1parse
+ (JNIEnv *, jclass, jobject, jlong, jint);
+
+/*
+ * Class: org_bitcoin_NativeSecp256k1
+ * Method: secp256k1_ecdh
+ * Signature: (Ljava/nio/ByteBuffer;JI)[[B
+ */
+SECP256K1_API jobjectArray JNICALL Java_org_bitcoin_NativeSecp256k1_secp256k1_1ecdh
+ (JNIEnv* env, jclass classObject, jobject byteBufferObject, jlong ctx_l, jint publen);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c
new file mode 100644
index 000000000..a52939e7e
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.c
@@ -0,0 +1,15 @@
+#include
+#include
+#include "org_bitcoin_Secp256k1Context.h"
+#include "include/secp256k1.h"
+
+SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
+ (JNIEnv* env, jclass classObject)
+{
+ secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ (void)classObject;(void)env;
+
+ return (uintptr_t)ctx;
+}
+
diff --git a/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h
new file mode 100644
index 000000000..0d2bc84b7
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/java/org_bitcoin_Secp256k1Context.h
@@ -0,0 +1,22 @@
+/* DO NOT EDIT THIS FILE - it is machine generated */
+#include
+#include "include/secp256k1.h"
+/* Header for class org_bitcoin_Secp256k1Context */
+
+#ifndef _Included_org_bitcoin_Secp256k1Context
+#define _Included_org_bitcoin_Secp256k1Context
+#ifdef __cplusplus
+extern "C" {
+#endif
+/*
+ * Class: org_bitcoin_Secp256k1Context
+ * Method: secp256k1_init_context
+ * Signature: ()J
+ */
+SECP256K1_API jlong JNICALL Java_org_bitcoin_Secp256k1Context_secp256k1_1init_1context
+ (JNIEnv *, jclass);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
new file mode 100644
index 000000000..e3088b469
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/Makefile.am.include
@@ -0,0 +1,8 @@
+include_HEADERS += include/secp256k1_ecdh.h
+noinst_HEADERS += src/modules/ecdh/main_impl.h
+noinst_HEADERS += src/modules/ecdh/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_ecdh
+bench_ecdh_SOURCES = src/bench_ecdh.c
+bench_ecdh_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
+endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
new file mode 100644
index 000000000..9e30fb73d
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/main_impl.h
@@ -0,0 +1,54 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_ECDH_MAIN_
+#define _SECP256K1_MODULE_ECDH_MAIN_
+
+#include "include/secp256k1_ecdh.h"
+#include "ecmult_const_impl.h"
+
+int secp256k1_ecdh(const secp256k1_context* ctx, unsigned char *result, const secp256k1_pubkey *point, const unsigned char *scalar) {
+ int ret = 0;
+ int overflow = 0;
+ secp256k1_gej res;
+ secp256k1_ge pt;
+ secp256k1_scalar s;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(result != NULL);
+ ARG_CHECK(point != NULL);
+ ARG_CHECK(scalar != NULL);
+
+ secp256k1_pubkey_load(ctx, &pt, point);
+ secp256k1_scalar_set_b32(&s, scalar, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(&s)) {
+ ret = 0;
+ } else {
+ unsigned char x[32];
+ unsigned char y[1];
+ secp256k1_sha256_t sha;
+
+ secp256k1_ecmult_const(&res, &pt, &s);
+ secp256k1_ge_set_gej(&pt, &res);
+ /* Compute a hash of the point in compressed form
+ * Note we cannot use secp256k1_eckey_pubkey_serialize here since it does not
+ * expect its output to be secret and has a timing sidechannel. */
+ secp256k1_fe_normalize(&pt.x);
+ secp256k1_fe_normalize(&pt.y);
+ secp256k1_fe_get_b32(x, &pt.x);
+ y[0] = 0x02 | secp256k1_fe_is_odd(&pt.y);
+
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, y, sizeof(y));
+ secp256k1_sha256_write(&sha, x, sizeof(x));
+ secp256k1_sha256_finalize(&sha, result);
+ ret = 1;
+ }
+
+ secp256k1_scalar_clear(&s);
+ return ret;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
new file mode 100644
index 000000000..85a5d0a9a
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/ecdh/tests_impl.h
@@ -0,0 +1,105 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_ECDH_TESTS_
+#define _SECP256K1_MODULE_ECDH_TESTS_
+
+void test_ecdh_api(void) {
+ /* Setup context that just counts errors */
+ secp256k1_context *tctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_pubkey point;
+ unsigned char res[32];
+ unsigned char s_one[32] = { 0 };
+ int32_t ecount = 0;
+ s_one[31] = 1;
+
+ secp256k1_context_set_error_callback(tctx, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(tctx, counting_illegal_callback_fn, &ecount);
+ CHECK(secp256k1_ec_pubkey_create(tctx, &point, s_one) == 1);
+
+ /* Check all NULLs are detected */
+ CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ecdh(tctx, NULL, &point, s_one) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdh(tctx, res, NULL, s_one) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdh(tctx, res, &point, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdh(tctx, res, &point, s_one) == 1);
+ CHECK(ecount == 3);
+
+ /* Cleanup */
+ secp256k1_context_destroy(tctx);
+}
+
+void test_ecdh_generator_basepoint(void) {
+ unsigned char s_one[32] = { 0 };
+ secp256k1_pubkey point[2];
+ int i;
+
+ s_one[31] = 1;
+ /* Check against pubkey creation when the basepoint is the generator */
+ for (i = 0; i < 100; ++i) {
+ secp256k1_sha256_t sha;
+ unsigned char s_b32[32];
+ unsigned char output_ecdh[32];
+ unsigned char output_ser[32];
+ unsigned char point_ser[33];
+ size_t point_ser_len = sizeof(point_ser);
+ secp256k1_scalar s;
+
+ random_scalar_order(&s);
+ secp256k1_scalar_get_b32(s_b32, &s);
+
+ /* compute using ECDH function */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[0], s_one) == 1);
+ CHECK(secp256k1_ecdh(ctx, output_ecdh, &point[0], s_b32) == 1);
+ /* compute "explicitly" */
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point[1], s_b32) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, point_ser, &point_ser_len, &point[1], SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(point_ser_len == sizeof(point_ser));
+ secp256k1_sha256_initialize(&sha);
+ secp256k1_sha256_write(&sha, point_ser, point_ser_len);
+ secp256k1_sha256_finalize(&sha, output_ser);
+ /* compare */
+ CHECK(memcmp(output_ecdh, output_ser, sizeof(output_ser)) == 0);
+ }
+}
+
+void test_bad_scalar(void) {
+ unsigned char s_zero[32] = { 0 };
+ unsigned char s_overflow[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41
+ };
+ unsigned char s_rand[32] = { 0 };
+ unsigned char output[32];
+ secp256k1_scalar rand;
+ secp256k1_pubkey point;
+
+ /* Create random point */
+ random_scalar_order(&rand);
+ secp256k1_scalar_get_b32(s_rand, &rand);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &point, s_rand) == 1);
+
+ /* Try to multiply it by bad values */
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_zero) == 0);
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 0);
+ /* ...and a good one */
+ s_overflow[31] -= 1;
+ CHECK(secp256k1_ecdh(ctx, output, &point, s_overflow) == 1);
+}
+
+void run_ecdh_tests(void) {
+ test_ecdh_api();
+ test_ecdh_generator_basepoint();
+ test_bad_scalar();
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include b/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
new file mode 100644
index 000000000..bf23c26e7
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/Makefile.am.include
@@ -0,0 +1,8 @@
+include_HEADERS += include/secp256k1_recovery.h
+noinst_HEADERS += src/modules/recovery/main_impl.h
+noinst_HEADERS += src/modules/recovery/tests_impl.h
+if USE_BENCHMARK
+noinst_PROGRAMS += bench_recover
+bench_recover_SOURCES = src/bench_recover.c
+bench_recover_LDADD = libsecp256k1.la $(SECP_LIBS) $(COMMON_LIB)
+endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
new file mode 100755
index 000000000..c6fbe2398
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/main_impl.h
@@ -0,0 +1,193 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_RECOVERY_MAIN_
+#define _SECP256K1_MODULE_RECOVERY_MAIN_
+
+#include "include/secp256k1_recovery.h"
+
+static void secp256k1_ecdsa_recoverable_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, int* recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+ *recid = sig->data[64];
+}
+
+static void secp256k1_ecdsa_recoverable_signature_save(secp256k1_ecdsa_recoverable_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s, int recid) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+ sig->data[64] = recid;
+}
+
+int secp256k1_ecdsa_recoverable_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature* sig, const unsigned char *input64, int recid) {
+ secp256k1_scalar r, s;
+ int ret = 1;
+ int overflow = 0;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input64 != NULL);
+ ARG_CHECK(recid >= 0 && recid <= 3);
+
+ secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
+ ret &= !overflow;
+ secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
+ ret &= !overflow;
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(sig, &r, &s, recid);
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recoverable_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, int *recid, const secp256k1_ecdsa_recoverable_signature* sig) {
+ secp256k1_scalar r, s;
+
+ (void)ctx;
+ ARG_CHECK(output64 != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(recid != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, recid, sig);
+ secp256k1_scalar_get_b32(&output64[0], &r);
+ secp256k1_scalar_get_b32(&output64[32], &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_recoverable_signature_convert(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const secp256k1_ecdsa_recoverable_signature* sigin) {
+ secp256k1_scalar r, s;
+ int recid;
+
+ (void)ctx;
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(sigin != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, sigin);
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+}
+
+static int secp256k1_ecdsa_sig_recover(const secp256k1_ecmult_context *ctx, const secp256k1_scalar *sigr, const secp256k1_scalar* sigs, secp256k1_ge *pubkey, const secp256k1_scalar *message, int recid) {
+ unsigned char brx[32];
+ secp256k1_fe fx;
+ secp256k1_ge x;
+ secp256k1_gej xj;
+ secp256k1_scalar rn, u1, u2;
+ secp256k1_gej qj;
+ int r;
+
+ if (secp256k1_scalar_is_zero(sigr) || secp256k1_scalar_is_zero(sigs)) {
+ return 0;
+ }
+
+ secp256k1_scalar_get_b32(brx, sigr);
+ r = secp256k1_fe_set_b32(&fx, brx);
+ (void)r;
+ VERIFY_CHECK(r); /* brx comes from a scalar, so is less than the order; certainly less than p */
+ if (recid & 2) {
+ if (secp256k1_fe_cmp_var(&fx, &secp256k1_ecdsa_const_p_minus_order) >= 0) {
+ return 0;
+ }
+ secp256k1_fe_add(&fx, &secp256k1_ecdsa_const_order_as_fe);
+ }
+ if (!secp256k1_ge_set_xo_var(&x, &fx, recid & 1)) {
+ return 0;
+ }
+ secp256k1_gej_set_ge(&xj, &x);
+ secp256k1_scalar_inverse_var(&rn, sigr);
+ secp256k1_scalar_mul(&u1, &rn, message);
+ secp256k1_scalar_negate(&u1, &u1);
+ secp256k1_scalar_mul(&u2, &rn, sigs);
+ secp256k1_ecmult(ctx, &qj, &xj, &u2, &u1);
+ secp256k1_ge_set_gej_var(pubkey, &qj);
+ return !secp256k1_gej_is_infinity(&qj);
+}
+
+int secp256k1_ecdsa_sign_recoverable(const secp256k1_context* ctx, secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int recid;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ if (!secp256k1_scalar_is_zero(&non) && !overflow) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, &recid)) {
+ break;
+ }
+ }
+ count++;
+ }
+ memset(nonce32, 0, 32);
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_recoverable_signature_save(signature, &r, &s, recid);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_recover(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const secp256k1_ecdsa_recoverable_signature *signature, const unsigned char *msg32) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ int recid;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, signature);
+ VERIFY_CHECK(recid >= 0 && recid < 4); /* should have been caught in parse_compact */
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ if (secp256k1_ecdsa_sig_recover(&ctx->ecmult_ctx, &r, &s, &q, &m, recid)) {
+ secp256k1_pubkey_save(pubkey, &q);
+ return 1;
+ } else {
+ memset(pubkey, 0, sizeof(*pubkey));
+ return 0;
+ }
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h b/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
new file mode 100644
index 000000000..765c7dd81
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/modules/recovery/tests_impl.h
@@ -0,0 +1,393 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_MODULE_RECOVERY_TESTS_
+#define _SECP256K1_MODULE_RECOVERY_TESTS_
+
+static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ (void) msg32;
+ (void) key32;
+ (void) algo16;
+ (void) data;
+
+ /* On the first run, return 0 to force a second run */
+ if (counter == 0) {
+ memset(nonce32, 0, 32);
+ return 1;
+ }
+ /* On the second run, return an overflow to force a third run */
+ if (counter == 1) {
+ memset(nonce32, 0xff, 32);
+ return 1;
+ }
+ /* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
+ memset(nonce32, 1, 32);
+ return secp256k1_rand_bits(1);
+}
+
+void test_ecdsa_recovery_api(void) {
+ /* Setup contexts that just count errors */
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey recpubkey;
+ secp256k1_ecdsa_signature normal_sig;
+ secp256k1_ecdsa_recoverable_signature recsig;
+ unsigned char privkey[32] = { 1 };
+ unsigned char message[32] = { 2 };
+ int32_t ecount = 0;
+ int recid = 0;
+ unsigned char sig[74];
+ unsigned char zero_privkey[32] = { 0 };
+ unsigned char over_privkey[32] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+
+ secp256k1_context_set_error_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_error_callback(both, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(none, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(both, counting_illegal_callback_fn, &ecount);
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Check bad contexts and NULLs for signing */
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_sign_recoverable(none, &recsig, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(sign, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(vrfy, &recsig, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, NULL, message, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, NULL, privkey, NULL, NULL) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, NULL, NULL, NULL) == 0);
+ CHECK(ecount == 5);
+ /* This will fail or succeed randomly, and in either case will not ARG_CHECK failure */
+ secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, recovery_test_nonce_function, NULL);
+ CHECK(ecount == 5);
+ /* These will all fail, but not in ARG_CHECK way */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, zero_privkey, NULL, NULL) == 0);
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, over_privkey, NULL, NULL) == 0);
+ /* This one will succeed. */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ CHECK(ecount == 5);
+
+ /* Check signing with a goofy nonce function */
+
+ /* Check bad contexts and NULLs for recovery */
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recover(none, &recpubkey, &recsig, message) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recover(sign, &recpubkey, &recsig, message) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(vrfy, &recpubkey, &recsig, message) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, message) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recover(both, NULL, &recsig, message) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, NULL, message) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_recover(both, &recpubkey, &recsig, NULL) == 0);
+ CHECK(ecount == 5);
+
+ /* Check NULLs for conversion */
+ CHECK(secp256k1_ecdsa_sign(both, &normal_sig, message, privkey, NULL, NULL) == 1);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, NULL, &recsig) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(both, &normal_sig, &recsig) == 1);
+
+ /* Check NULLs for de/serialization */
+ CHECK(secp256k1_ecdsa_sign_recoverable(both, &recsig, message, privkey, NULL, NULL) == 1);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, NULL, &recid, &recsig) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, NULL, &recsig) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(both, sig, &recid, &recsig) == 1);
+
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, NULL, sig, recid) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, NULL, recid) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, -1) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, 5) == 0);
+ CHECK(ecount == 7);
+ /* overflow in signature will fail but not affect ecount */
+ memcpy(sig, over_privkey, 32);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(both, &recsig, sig, recid) == 0);
+ CHECK(ecount == 7);
+
+ /* cleanup */
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+}
+
+void test_ecdsa_recovery_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ secp256k1_ecdsa_signature signature[5];
+ secp256k1_ecdsa_recoverable_signature rsignature[5];
+ unsigned char sig[74];
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey recpubkey;
+ int recid = 0;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Serialize/parse compact and verify/recover. */
+ extra[0] = 0;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign_recoverable(ctx, &rsignature[3], message, privkey, NULL, extra) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(memcmp(&signature[4], &signature[0], 64) == 0);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ memset(&rsignature[4], 0, sizeof(rsignature[4]));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 1);
+ /* Parse compact (with recovery id) and recover. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 1);
+ CHECK(memcmp(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
+ /* Serialize/destroy/parse signature and verify again. */
+ CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(ctx, sig, &recid, &rsignature[4]) == 1);
+ sig[secp256k1_rand_bits(6)] += 1 + secp256k1_rand_int(255);
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsignature[4], sig, recid) == 1);
+ CHECK(secp256k1_ecdsa_recoverable_signature_convert(ctx, &signature[4], &rsignature[4]) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[4], message, &pubkey) == 0);
+ /* Recover again */
+ CHECK(secp256k1_ecdsa_recover(ctx, &recpubkey, &rsignature[4], message) == 0 ||
+ memcmp(&pubkey, &recpubkey, sizeof(pubkey)) != 0);
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_recovery_edge_cases(void) {
+ const unsigned char msg32[32] = {
+ 'T', 'h', 'i', 's', ' ', 'i', 's', ' ',
+ 'a', ' ', 'v', 'e', 'r', 'y', ' ', 's',
+ 'e', 'c', 'r', 'e', 't', ' ', 'm', 'e',
+ 's', 's', 'a', 'g', 'e', '.', '.', '.'
+ };
+ const unsigned char sig64[64] = {
+ /* Generated by signing the above message with nonce 'This is the nonce we will use...'
+ * and secret key 0 (which is not valid), resulting in recid 0. */
+ 0x67, 0xCB, 0x28, 0x5F, 0x9C, 0xD1, 0x94, 0xE8,
+ 0x40, 0xD6, 0x29, 0x39, 0x7A, 0xF5, 0x56, 0x96,
+ 0x62, 0xFD, 0xE4, 0x46, 0x49, 0x99, 0x59, 0x63,
+ 0x17, 0x9A, 0x7D, 0xD1, 0x7B, 0xD2, 0x35, 0x32,
+ 0x4B, 0x1B, 0x7D, 0xF3, 0x4C, 0xE1, 0xF6, 0x8E,
+ 0x69, 0x4F, 0xF6, 0xF1, 0x1A, 0xC7, 0x51, 0xDD,
+ 0x7D, 0xD7, 0x3E, 0x38, 0x7E, 0xE4, 0xFC, 0x86,
+ 0x6E, 0x1B, 0xE8, 0xEC, 0xC7, 0xDD, 0x95, 0x57
+ };
+ secp256k1_pubkey pubkey;
+ /* signature (r,s) = (4,4), which can be recovered with all 4 recids. */
+ const unsigned char sigb64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ secp256k1_pubkey pubkeyb;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ int recid;
+
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 0));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 1));
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 2));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sig64, 3));
+ CHECK(!secp256k1_ecdsa_recover(ctx, &pubkey, &rsig, msg32));
+
+ for (recid = 0; recid < 4; recid++) {
+ int i;
+ int recid2;
+ /* (4,4) encoded in DER. */
+ unsigned char sigbder[8] = {0x30, 0x06, 0x02, 0x01, 0x04, 0x02, 0x01, 0x04};
+ unsigned char sigcder_zr[7] = {0x30, 0x05, 0x02, 0x00, 0x02, 0x01, 0x01};
+ unsigned char sigcder_zs[7] = {0x30, 0x05, 0x02, 0x01, 0x01, 0x02, 0x00};
+ unsigned char sigbderalt1[39] = {
+ 0x30, 0x25, 0x02, 0x20, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt2[39] = {
+ 0x30, 0x25, 0x02, 0x01, 0x04, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ unsigned char sigbderalt3[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x01, 0x04,
+ };
+ unsigned char sigbderalt4[40] = {
+ 0x30, 0x26, 0x02, 0x01, 0x04, 0x02, 0x21, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04,
+ };
+ /* (order + r,4) encoded in DER. */
+ unsigned char sigbderlong[40] = {
+ 0x30, 0x26, 0x02, 0x21, 0x00, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFE, 0xBA, 0xAE, 0xDC,
+ 0xE6, 0xAF, 0x48, 0xA0, 0x3B, 0xBF, 0xD2, 0x5E,
+ 0x8C, 0xD0, 0x36, 0x41, 0x45, 0x02, 0x01, 0x04
+ };
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 1);
+ for (recid2 = 0; recid2 < 4; recid2++) {
+ secp256k1_pubkey pubkey2b;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigb64, recid2) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkey2b, &rsig, msg32) == 1);
+ /* Verifying with (order + r,4) should always fail. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderlong, sizeof(sigbderlong)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ }
+ /* DER parsing tests. */
+ /* Zero length r/s. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zr, sizeof(sigcder_zr)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder_zs, sizeof(sigcder_zs)) == 0);
+ /* Leading zeros. */
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt1, sizeof(sigbderalt1)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt2, sizeof(sigbderalt2)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 0);
+ sigbderalt3[4] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt3, sizeof(sigbderalt3)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ sigbderalt4[7] = 1;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbderalt4, sizeof(sigbderalt4)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ /* Damage signature. */
+ sigbder[7]++;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ sigbder[7]--;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, 6) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder) - 1) == 0);
+ for(i = 0; i < 8; i++) {
+ int c;
+ unsigned char orig = sigbder[i];
+ /*Try every single-byte change.*/
+ for (c = 0; c < 256; c++) {
+ if (c == orig ) {
+ continue;
+ }
+ sigbder[i] = c;
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigbder, sizeof(sigbder)) == 0 || secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyb) == 0);
+ }
+ sigbder[i] = orig;
+ }
+ }
+
+ /* Test r/s equal to zero */
+ {
+ /* (1,1) encoded in DER. */
+ unsigned char sigcder[8] = {0x30, 0x06, 0x02, 0x01, 0x01, 0x02, 0x01, 0x01};
+ unsigned char sigc64[64] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ secp256k1_pubkey pubkeyc;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyc, &rsig, msg32) == 1);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 1);
+ sigcder[4] = 0;
+ sigc64[31] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ sigcder[4] = 1;
+ sigcder[7] = 0;
+ sigc64[31] = 1;
+ sigc64[63] = 0;
+ CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(ctx, &rsig, sigc64, 0) == 1);
+ CHECK(secp256k1_ecdsa_recover(ctx, &pubkeyb, &rsig, msg32) == 0);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, sigcder, sizeof(sigcder)) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg32, &pubkeyc) == 0);
+ }
+}
+
+void run_recovery_tests(void) {
+ int i;
+ for (i = 0; i < count; i++) {
+ test_ecdsa_recovery_api();
+ }
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_recovery_end_to_end();
+ }
+ test_ecdsa_recovery_edge_cases();
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num.h b/crypto/secp256k1/libsecp256k1/src/num.h
new file mode 100644
index 000000000..eff842200
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num.h
@@ -0,0 +1,74 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_
+#define _SECP256K1_NUM_
+
+#ifndef USE_NUM_NONE
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp.h"
+#else
+#error "Please select num implementation"
+#endif
+
+/** Copy a number. */
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a);
+
+/** Convert a number's absolute value to a binary big-endian string.
+ * There must be enough place. */
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a);
+
+/** Set a number to the value of a binary big-endian string. */
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen);
+
+/** Compute a modular inverse. The input must be less than the modulus. */
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m);
+
+/** Compute the jacobi symbol (a|b). b must be positive and odd. */
+static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Compare the absolute value of two numbers. */
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Test whether two number are equal (including sign). */
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b);
+
+/** Add two (signed) numbers. */
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Subtract two (signed) numbers. */
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Multiply two (signed) numbers. */
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b);
+
+/** Replace a number by its remainder modulo m. M's sign is ignored. The result is a number between 0 and m-1,
+ even if r was negative. */
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m);
+
+/** Right-shift the passed number by bits. */
+static void secp256k1_num_shift(secp256k1_num *r, int bits);
+
+/** Check whether a number is zero. */
+static int secp256k1_num_is_zero(const secp256k1_num *a);
+
+/** Check whether a number is one. */
+static int secp256k1_num_is_one(const secp256k1_num *a);
+
+/** Check whether a number is strictly negative. */
+static int secp256k1_num_is_neg(const secp256k1_num *a);
+
+/** Change a number's sign. */
+static void secp256k1_num_negate(secp256k1_num *r);
+
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_gmp.h b/crypto/secp256k1/libsecp256k1/src/num_gmp.h
new file mode 100644
index 000000000..7dd813088
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_gmp.h
@@ -0,0 +1,20 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_REPR_
+#define _SECP256K1_NUM_REPR_
+
+#include
+
+#define NUM_LIMBS ((256+GMP_NUMB_BITS-1)/GMP_NUMB_BITS)
+
+typedef struct {
+ mp_limb_t data[2*NUM_LIMBS];
+ int neg;
+ int limbs;
+} secp256k1_num;
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h b/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
new file mode 100644
index 000000000..3a46495ee
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_gmp_impl.h
@@ -0,0 +1,288 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_REPR_IMPL_H_
+#define _SECP256K1_NUM_REPR_IMPL_H_
+
+#include
+#include
+#include
+
+#include "util.h"
+#include "num.h"
+
+#ifdef VERIFY
+static void secp256k1_num_sanity(const secp256k1_num *a) {
+ VERIFY_CHECK(a->limbs == 1 || (a->limbs > 1 && a->data[a->limbs-1] != 0));
+}
+#else
+#define secp256k1_num_sanity(a) do { } while(0)
+#endif
+
+static void secp256k1_num_copy(secp256k1_num *r, const secp256k1_num *a) {
+ *r = *a;
+}
+
+static void secp256k1_num_get_bin(unsigned char *r, unsigned int rlen, const secp256k1_num *a) {
+ unsigned char tmp[65];
+ int len = 0;
+ int shift = 0;
+ if (a->limbs>1 || a->data[0] != 0) {
+ len = mpn_get_str(tmp, 256, (mp_limb_t*)a->data, a->limbs);
+ }
+ while (shift < len && tmp[shift] == 0) shift++;
+ VERIFY_CHECK(len-shift <= (int)rlen);
+ memset(r, 0, rlen - len + shift);
+ if (len > shift) {
+ memcpy(r + rlen - len + shift, tmp + shift, len - shift);
+ }
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_set_bin(secp256k1_num *r, const unsigned char *a, unsigned int alen) {
+ int len;
+ VERIFY_CHECK(alen > 0);
+ VERIFY_CHECK(alen <= 64);
+ len = mpn_set_str(r->data, a, alen, 256);
+ if (len == 0) {
+ r->data[0] = 0;
+ len = 1;
+ }
+ VERIFY_CHECK(len <= NUM_LIMBS*2);
+ r->limbs = len;
+ r->neg = 0;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_add_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_add(r->data, a->data, a->limbs, b->data, b->limbs);
+ r->limbs = a->limbs;
+ if (c != 0) {
+ VERIFY_CHECK(r->limbs < 2*NUM_LIMBS);
+ r->data[r->limbs++] = c;
+ }
+}
+
+static void secp256k1_num_sub_abs(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t c = mpn_sub(r->data, a->data, a->limbs, b->data, b->limbs);
+ (void)c;
+ VERIFY_CHECK(c == 0);
+ r->limbs = a->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_mod(secp256k1_num *r, const secp256k1_num *m) {
+ secp256k1_num_sanity(r);
+ secp256k1_num_sanity(m);
+
+ if (r->limbs >= m->limbs) {
+ mp_limb_t t[2*NUM_LIMBS];
+ mpn_tdiv_qr(t, r->data, 0, r->data, r->limbs, m->data, m->limbs);
+ memset(t, 0, sizeof(t));
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ }
+
+ if (r->neg && (r->limbs > 1 || r->data[0] != 0)) {
+ secp256k1_num_sub_abs(r, m, r);
+ r->neg = 0;
+ }
+}
+
+static void secp256k1_num_mod_inverse(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *m) {
+ int i;
+ mp_limb_t g[NUM_LIMBS+1];
+ mp_limb_t u[NUM_LIMBS+1];
+ mp_limb_t v[NUM_LIMBS+1];
+ mp_size_t sn;
+ mp_size_t gn;
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(m);
+
+ /** mpn_gcdext computes: (G,S) = gcdext(U,V), where
+ * * G = gcd(U,V)
+ * * G = U*S + V*T
+ * * U has equal or more limbs than V, and V has no padding
+ * If we set U to be (a padded version of) a, and V = m:
+ * G = a*S + m*T
+ * G = a*S mod m
+ * Assuming G=1:
+ * S = 1/a mod m
+ */
+ VERIFY_CHECK(m->limbs <= NUM_LIMBS);
+ VERIFY_CHECK(m->data[m->limbs-1] != 0);
+ for (i = 0; i < m->limbs; i++) {
+ u[i] = (i < a->limbs) ? a->data[i] : 0;
+ v[i] = m->data[i];
+ }
+ sn = NUM_LIMBS+1;
+ gn = mpn_gcdext(g, r->data, &sn, u, m->limbs, v, m->limbs);
+ (void)gn;
+ VERIFY_CHECK(gn == 1);
+ VERIFY_CHECK(g[0] == 1);
+ r->neg = a->neg ^ m->neg;
+ if (sn < 0) {
+ mpn_sub(r->data, m->data, m->limbs, r->data, -sn);
+ r->limbs = m->limbs;
+ while (r->limbs > 1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+ } else {
+ r->limbs = sn;
+ }
+ memset(g, 0, sizeof(g));
+ memset(u, 0, sizeof(u));
+ memset(v, 0, sizeof(v));
+}
+
+static int secp256k1_num_jacobi(const secp256k1_num *a, const secp256k1_num *b) {
+ int ret;
+ mpz_t ga, gb;
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ VERIFY_CHECK(!b->neg && (b->limbs > 0) && (b->data[0] & 1));
+
+ mpz_inits(ga, gb, NULL);
+
+ mpz_import(gb, b->limbs, -1, sizeof(mp_limb_t), 0, 0, b->data);
+ mpz_import(ga, a->limbs, -1, sizeof(mp_limb_t), 0, 0, a->data);
+ if (a->neg) {
+ mpz_neg(ga, ga);
+ }
+
+ ret = mpz_jacobi(ga, gb);
+
+ mpz_clears(ga, gb, NULL);
+
+ return ret;
+}
+
+static int secp256k1_num_is_one(const secp256k1_num *a) {
+ return (a->limbs == 1 && a->data[0] == 1);
+}
+
+static int secp256k1_num_is_zero(const secp256k1_num *a) {
+ return (a->limbs == 1 && a->data[0] == 0);
+}
+
+static int secp256k1_num_is_neg(const secp256k1_num *a) {
+ return (a->limbs > 1 || a->data[0] != 0) && a->neg;
+}
+
+static int secp256k1_num_cmp(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 1;
+ }
+ if (a->limbs < b->limbs) {
+ return -1;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs);
+}
+
+static int secp256k1_num_eq(const secp256k1_num *a, const secp256k1_num *b) {
+ if (a->limbs > b->limbs) {
+ return 0;
+ }
+ if (a->limbs < b->limbs) {
+ return 0;
+ }
+ if ((a->neg && !secp256k1_num_is_zero(a)) != (b->neg && !secp256k1_num_is_zero(b))) {
+ return 0;
+ }
+ return mpn_cmp(a->data, b->data, a->limbs) == 0;
+}
+
+static void secp256k1_num_subadd(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b, int bneg) {
+ if (!(b->neg ^ bneg ^ a->neg)) { /* a and b have the same sign */
+ r->neg = a->neg;
+ if (a->limbs >= b->limbs) {
+ secp256k1_num_add_abs(r, a, b);
+ } else {
+ secp256k1_num_add_abs(r, b, a);
+ }
+ } else {
+ if (secp256k1_num_cmp(a, b) > 0) {
+ r->neg = a->neg;
+ secp256k1_num_sub_abs(r, a, b);
+ } else {
+ r->neg = b->neg ^ bneg;
+ secp256k1_num_sub_abs(r, b, a);
+ }
+ }
+}
+
+static void secp256k1_num_add(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 0);
+}
+
+static void secp256k1_num_sub(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+ secp256k1_num_subadd(r, a, b, 1);
+}
+
+static void secp256k1_num_mul(secp256k1_num *r, const secp256k1_num *a, const secp256k1_num *b) {
+ mp_limb_t tmp[2*NUM_LIMBS+1];
+ secp256k1_num_sanity(a);
+ secp256k1_num_sanity(b);
+
+ VERIFY_CHECK(a->limbs + b->limbs <= 2*NUM_LIMBS+1);
+ if ((a->limbs==1 && a->data[0]==0) || (b->limbs==1 && b->data[0]==0)) {
+ r->limbs = 1;
+ r->neg = 0;
+ r->data[0] = 0;
+ return;
+ }
+ if (a->limbs >= b->limbs) {
+ mpn_mul(tmp, a->data, a->limbs, b->data, b->limbs);
+ } else {
+ mpn_mul(tmp, b->data, b->limbs, a->data, a->limbs);
+ }
+ r->limbs = a->limbs + b->limbs;
+ if (r->limbs > 1 && tmp[r->limbs - 1]==0) {
+ r->limbs--;
+ }
+ VERIFY_CHECK(r->limbs <= 2*NUM_LIMBS);
+ mpn_copyi(r->data, tmp, r->limbs);
+ r->neg = a->neg ^ b->neg;
+ memset(tmp, 0, sizeof(tmp));
+}
+
+static void secp256k1_num_shift(secp256k1_num *r, int bits) {
+ if (bits % GMP_NUMB_BITS) {
+ /* Shift within limbs. */
+ mpn_rshift(r->data, r->data, r->limbs, bits % GMP_NUMB_BITS);
+ }
+ if (bits >= GMP_NUMB_BITS) {
+ int i;
+ /* Shift full limbs. */
+ for (i = 0; i < r->limbs; i++) {
+ int index = i + (bits / GMP_NUMB_BITS);
+ if (index < r->limbs && index < 2*NUM_LIMBS) {
+ r->data[i] = r->data[index];
+ } else {
+ r->data[i] = 0;
+ }
+ }
+ }
+ while (r->limbs>1 && r->data[r->limbs-1]==0) {
+ r->limbs--;
+ }
+}
+
+static void secp256k1_num_negate(secp256k1_num *r) {
+ r->neg ^= 1;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/num_impl.h b/crypto/secp256k1/libsecp256k1/src/num_impl.h
new file mode 100644
index 000000000..0b0e3a072
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/num_impl.h
@@ -0,0 +1,24 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_NUM_IMPL_H_
+#define _SECP256K1_NUM_IMPL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include "num.h"
+
+#if defined(USE_NUM_GMP)
+#include "num_gmp_impl.h"
+#elif defined(USE_NUM_NONE)
+/* Nothing. */
+#else
+#error "Please select num implementation"
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar.h b/crypto/secp256k1/libsecp256k1/src/scalar.h
new file mode 100644
index 000000000..27e9d8375
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar.h
@@ -0,0 +1,106 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_
+#define _SECP256K1_SCALAR_
+
+#include "num.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low.h"
+#elif defined(USE_SCALAR_4X64)
+#include "scalar_4x64.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+/** Clear a scalar to prevent the leak of sensitive data. */
+static void secp256k1_scalar_clear(secp256k1_scalar *r);
+
+/** Access bits from a scalar. All requested bits must belong to the same 32-bit limb. */
+static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Access bits from a scalar. Not constant time. */
+static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count);
+
+/** Set a scalar from a big endian byte array. */
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *bin, int *overflow);
+
+/** Set a scalar to an unsigned integer. */
+static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v);
+
+/** Convert a scalar to a byte array. */
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a);
+
+/** Add two scalars together (modulo the group order). Returns whether it overflowed. */
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Conditionally add a power of two to a scalar. The result is not allowed to overflow. */
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag);
+
+/** Multiply two scalars (modulo the group order). */
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+/** Shift a scalar right by some amount strictly between 0 and 16, returning
+ * the low bits that were shifted off */
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n);
+
+/** Compute the square of a scalar (modulo the group order). */
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order). */
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the inverse of a scalar (modulo the group order), without constant-time guarantee. */
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Compute the complement of a scalar (modulo the group order). */
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a);
+
+/** Check whether a scalar equals zero. */
+static int secp256k1_scalar_is_zero(const secp256k1_scalar *a);
+
+/** Check whether a scalar equals one. */
+static int secp256k1_scalar_is_one(const secp256k1_scalar *a);
+
+/** Check whether a scalar, considered as an nonnegative integer, is even. */
+static int secp256k1_scalar_is_even(const secp256k1_scalar *a);
+
+/** Check whether a scalar is higher than the group order divided by 2. */
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a);
+
+/** Conditionally negate a number, in constant time.
+ * Returns -1 if the number was negated, 1 otherwise */
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *a, int flag);
+
+#ifndef USE_NUM_NONE
+/** Convert a scalar to a number. */
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a);
+
+/** Get the order of the group as a number. */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r);
+#endif
+
+/** Compare two scalars. */
+static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b);
+
+#ifdef USE_ENDOMORPHISM
+/** Find r1 and r2 such that r1+r2*2^128 = a. */
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+/** Find r1 and r2 such that r1+r2*lambda = a, and r1 and r2 are maximum 128 bits long (see secp256k1_gej_mul_lambda). */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a);
+#endif
+
+/** Multiply a and b (without taking the modulus!), divide by 2**shift, and round to the nearest integer. Shift must be at least 256. */
+static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h b/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
new file mode 100644
index 000000000..cff406038
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_4x64.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint64_t d[4];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{((uint64_t)(d1)) << 32 | (d0), ((uint64_t)(d3)) << 32 | (d2), ((uint64_t)(d5)) << 32 | (d4), ((uint64_t)(d7)) << 32 | (d6)}}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
new file mode 100644
index 000000000..56e7bd82a
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_4x64_impl.h
@@ -0,0 +1,949 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint64_t)0xBFD25E8CD0364141ULL)
+#define SECP256K1_N_1 ((uint64_t)0xBAAEDCE6AF48A03BULL)
+#define SECP256K1_N_2 ((uint64_t)0xFFFFFFFFFFFFFFFEULL)
+#define SECP256K1_N_3 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint64_t)0xDFE92F46681B20A0ULL)
+#define SECP256K1_N_H_1 ((uint64_t)0x5D576E7357A4501DULL)
+#define SECP256K1_N_H_2 ((uint64_t)0xFFFFFFFFFFFFFFFFULL)
+#define SECP256K1_N_H_3 ((uint64_t)0x7FFFFFFFFFFFFFFFULL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 6 == offset >> 6);
+ return (a->d[offset >> 6] >> (offset & 0x3F)) & ((((uint64_t)1) << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 6 == offset >> 6) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 6) + 1 < 4);
+ return ((a->d[offset >> 6] >> (offset & 0x3F)) | (a->d[(offset >> 6) + 1] << (64 - (offset & 0x3F)))) & ((((uint64_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_3); /* No need for a > check. */
+ no |= (a->d[2] < SECP256K1_N_2);
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1);
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, unsigned int overflow) {
+ uint128_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint128_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint64_t)r->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint128_t t = (uint128_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint128_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 6) > 3 makes this a noop */
+ t = (uint128_t)r->d[0] + (((uint64_t)((bit >> 6) == 0)) << (bit & 0x3F));
+ r->d[0] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[1] + (((uint64_t)((bit >> 6) == 1)) << (bit & 0x3F));
+ r->d[1] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[2] + (((uint64_t)((bit >> 6) == 2)) << (bit & 0x3F));
+ r->d[2] = t & 0xFFFFFFFFFFFFFFFFULL; t >>= 64;
+ t += (uint128_t)r->d[3] + (((uint64_t)((bit >> 6) == 3)) << (bit & 0x3F));
+ r->d[3] = t & 0xFFFFFFFFFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 64) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint64_t)b32[31] | (uint64_t)b32[30] << 8 | (uint64_t)b32[29] << 16 | (uint64_t)b32[28] << 24 | (uint64_t)b32[27] << 32 | (uint64_t)b32[26] << 40 | (uint64_t)b32[25] << 48 | (uint64_t)b32[24] << 56;
+ r->d[1] = (uint64_t)b32[23] | (uint64_t)b32[22] << 8 | (uint64_t)b32[21] << 16 | (uint64_t)b32[20] << 24 | (uint64_t)b32[19] << 32 | (uint64_t)b32[18] << 40 | (uint64_t)b32[17] << 48 | (uint64_t)b32[16] << 56;
+ r->d[2] = (uint64_t)b32[15] | (uint64_t)b32[14] << 8 | (uint64_t)b32[13] << 16 | (uint64_t)b32[12] << 24 | (uint64_t)b32[11] << 32 | (uint64_t)b32[10] << 40 | (uint64_t)b32[9] << 48 | (uint64_t)b32[8] << 56;
+ r->d[3] = (uint64_t)b32[7] | (uint64_t)b32[6] << 8 | (uint64_t)b32[5] << 16 | (uint64_t)b32[4] << 24 | (uint64_t)b32[3] << 32 | (uint64_t)b32[2] << 40 | (uint64_t)b32[1] << 48 | (uint64_t)b32[0] << 56;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[3] >> 56; bin[1] = a->d[3] >> 48; bin[2] = a->d[3] >> 40; bin[3] = a->d[3] >> 32; bin[4] = a->d[3] >> 24; bin[5] = a->d[3] >> 16; bin[6] = a->d[3] >> 8; bin[7] = a->d[3];
+ bin[8] = a->d[2] >> 56; bin[9] = a->d[2] >> 48; bin[10] = a->d[2] >> 40; bin[11] = a->d[2] >> 32; bin[12] = a->d[2] >> 24; bin[13] = a->d[2] >> 16; bin[14] = a->d[2] >> 8; bin[15] = a->d[2];
+ bin[16] = a->d[1] >> 56; bin[17] = a->d[1] >> 48; bin[18] = a->d[1] >> 40; bin[19] = a->d[1] >> 32; bin[20] = a->d[1] >> 24; bin[21] = a->d[1] >> 16; bin[22] = a->d[1] >> 8; bin[23] = a->d[1];
+ bin[24] = a->d[0] >> 56; bin[25] = a->d[0] >> 48; bin[26] = a->d[0] >> 40; bin[27] = a->d[0] >> 32; bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t nonzero = 0xFFFFFFFFFFFFFFFFULL * (secp256k1_scalar_is_zero(a) == 0);
+ uint128_t t = (uint128_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[3] < SECP256K1_N_H_3);
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes; /* No need for a > check. */
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint64_t mask = !flag - 1;
+ uint64_t nonzero = (secp256k1_scalar_is_zero(r) != 0) - 1;
+ uint128_t t = (uint128_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 64;
+ t += (uint128_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint64_t tl, th; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint64_t tl, th, th2, tl2; \
+ { \
+ uint128_t t = (uint128_t)a * b; \
+ th = t >> 64; /* at most 0xFFFFFFFFFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFFFFFFFFFE (in case th was 0x7FFFFFFFFFFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFFFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFFFFFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFFFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 64 bits of (c0,c1,c2) into n, and left shift the number 64 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint64_t *l) {
+#ifdef USE_ASM_X86_64
+ /* Reduce 512 bits into 385. */
+ uint64_t m0, m1, m2, m3, m4, m5, m6;
+ uint64_t p0, p1, p2, p3, p4;
+ uint64_t c;
+
+ __asm__ __volatile__(
+ /* Preload. */
+ "movq 32(%%rsi), %%r11\n"
+ "movq 40(%%rsi), %%r12\n"
+ "movq 48(%%rsi), %%r13\n"
+ "movq 56(%%rsi), %%r14\n"
+ /* Initialize r8,r9,r10 */
+ "movq 0(%%rsi), %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9) += n0 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract m0 */
+ "movq %%r8, %q0\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10) += l1 */
+ "addq 8(%%rsi), %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += n1 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n0 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m1 */
+ "movq %%r9, %q1\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += l2 */
+ "addq 16(%%rsi), %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n2 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n1 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += n0 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract m2 */
+ "movq %%r10, %q2\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += l3 */
+ "addq 24(%%rsi), %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n3 * c0 */
+ "movq %8, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n2 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += n1 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* extract m3 */
+ "movq %%r8, %q3\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += n3 * c1 */
+ "movq %9, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += n2 */
+ "addq %%r13, %%r9\n"
+ "adcq $0, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m4 */
+ "movq %%r9, %q4\n"
+ /* (r10,r8) += n3 */
+ "addq %%r14, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract m5 */
+ "movq %%r10, %q5\n"
+ /* extract m6 */
+ "movq %%r8, %q6\n"
+ : "=g"(m0), "=g"(m1), "=g"(m2), "=g"(m3), "=g"(m4), "=g"(m5), "=g"(m6)
+ : "S"(l), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc");
+
+ /* Reduce 385 bits into 258. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q9, %%r11\n"
+ "movq %q10, %%r12\n"
+ "movq %q11, %%r13\n"
+ /* Initialize (r8,r9,r10) */
+ "movq %q5, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9) += m4 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* extract p0 */
+ "movq %%r8, %q0\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10) += m1 */
+ "addq %q6, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r9,r10,r8) += m5 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += m4 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* extract p1 */
+ "movq %%r9, %q1\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += m2 */
+ "addq %q7, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m6 * c0 */
+ "movq %12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m5 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += m4 */
+ "addq %%r11, %%r10\n"
+ "adcq $0, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p2 */
+ "movq %%r10, %q2\n"
+ /* (r8,r9) += m3 */
+ "addq %q8, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += m6 * c1 */
+ "movq %13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* (r8,r9) += m5 */
+ "addq %%r12, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* extract p3 */
+ "movq %%r8, %q3\n"
+ /* (r9) += m6 */
+ "addq %%r13, %%r9\n"
+ /* extract p4 */
+ "movq %%r9, %q4\n"
+ : "=&g"(p0), "=&g"(p1), "=&g"(p2), "=g"(p3), "=g"(p4)
+ : "g"(m0), "g"(m1), "g"(m2), "g"(m3), "g"(m4), "g"(m5), "g"(m6), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "cc");
+
+ /* Reduce 258 bits into 256. */
+ __asm__ __volatile__(
+ /* Preload */
+ "movq %q5, %%r10\n"
+ /* (rax,rdx) = p4 * c0 */
+ "movq %7, %%rax\n"
+ "mulq %%r10\n"
+ /* (rax,rdx) += p0 */
+ "addq %q1, %%rax\n"
+ "adcq $0, %%rdx\n"
+ /* extract r0 */
+ "movq %%rax, 0(%q6)\n"
+ /* Move to (r8,r9) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ /* (r8,r9) += p1 */
+ "addq %q2, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r8,r9) += p4 * c1 */
+ "movq %8, %%rax\n"
+ "mulq %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ /* Extract r1 */
+ "movq %%r8, 8(%q6)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r8) += p4 */
+ "addq %%r10, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r8) += p2 */
+ "addq %q3, %%r9\n"
+ "adcq $0, %%r8\n"
+ /* Extract r2 */
+ "movq %%r9, 16(%q6)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r8,r9) += p3 */
+ "addq %q4, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract r3 */
+ "movq %%r8, 24(%q6)\n"
+ /* Extract c */
+ "movq %%r9, %q0\n"
+ : "=g"(c)
+ : "g"(p0), "g"(p1), "g"(p2), "g"(p3), "g"(p4), "D"(r), "n"(SECP256K1_N_C_0), "n"(SECP256K1_N_C_1)
+ : "rax", "rdx", "r8", "r9", "r10", "cc", "memory");
+#else
+ uint128_t c;
+ uint64_t c0, c1, c2;
+ uint64_t n0 = l[4], n1 = l[5], n2 = l[6], n3 = l[7];
+ uint64_t m0, m1, m2, m3, m4, m5;
+ uint32_t m6;
+ uint64_t p0, p1, p2, p3;
+ uint32_t p4;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..6] = l[0..3] + n[0..3] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ sumadd(n0);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ sumadd(n1);
+ extract(m3);
+ muladd(n3, SECP256K1_N_C_1);
+ sumadd(n2);
+ extract(m4);
+ sumadd_fast(n3);
+ extract_fast(m5);
+ VERIFY_CHECK(c0 <= 1);
+ m6 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..4] = m[0..3] + m[4..6] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m4, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m5, SECP256K1_N_C_0);
+ muladd(m4, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m6, SECP256K1_N_C_0);
+ muladd(m5, SECP256K1_N_C_1);
+ sumadd(m4);
+ extract(p2);
+ sumadd_fast(m3);
+ muladd_fast(m6, SECP256K1_N_C_1);
+ sumadd_fast(m5);
+ extract_fast(p3);
+ p4 = c0 + m6;
+ VERIFY_CHECK(p4 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..3] = p[0..3] + p[4] * SECP256K1_N_C. */
+ c = p0 + (uint128_t)SECP256K1_N_C_0 * p4;
+ r->d[0] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p1 + (uint128_t)SECP256K1_N_C_1 * p4;
+ r->d[1] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p2 + (uint128_t)p4;
+ r->d[2] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+ c += p3;
+ r->d[3] = c & 0xFFFFFFFFFFFFFFFFULL; c >>= 64;
+#endif
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint64_t l[8], const secp256k1_scalar *a, const secp256k1_scalar *b) {
+#ifdef USE_ASM_X86_64
+ const uint64_t *pb = b->d;
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r15\n"
+ "movq 8(%%rdi), %%rbx\n"
+ "movq 16(%%rdi), %%rcx\n"
+ "movq 0(%%rdx), %%r11\n"
+ "movq 8(%%rdx), %%r12\n"
+ "movq 16(%%rdx), %%r13\n"
+ "movq 24(%%rdx), %%r14\n"
+ /* (rax,rdx) = a0 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a0 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a1 * b0 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a0 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * b1 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a2 * b0 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += a0 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Preload a3 */
+ "movq 24(%%rdi), %%r15\n"
+ /* (r10,r8,r9) += a1 * b2 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a2 * b1 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += a3 * b0 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r11\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += a1 * b3 */
+ "movq %%rbx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * b2 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a3 * b1 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += a2 * b3 */
+ "movq %%rcx, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a3 * b2 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * b3 */
+ "movq %%r15, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ : "+d"(pb)
+ : "S"(l), "D"(a->d)
+ : "rax", "rbx", "rcx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ extract(l[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ extract(l[5]);
+ muladd_fast(a->d[3], b->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+static void secp256k1_scalar_sqr_512(uint64_t l[8], const secp256k1_scalar *a) {
+#ifdef USE_ASM_X86_64
+ __asm__ __volatile__(
+ /* Preload */
+ "movq 0(%%rdi), %%r11\n"
+ "movq 8(%%rdi), %%r12\n"
+ "movq 16(%%rdi), %%r13\n"
+ "movq 24(%%rdi), %%r14\n"
+ /* (rax,rdx) = a0 * a0 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r11\n"
+ /* Extract l0 */
+ "movq %%rax, 0(%%rsi)\n"
+ /* (r8,r9,r10) = (rdx,0) */
+ "movq %%rdx, %%r8\n"
+ "xorq %%r9, %%r9\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a0 * a1 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l1 */
+ "movq %%r8, 8(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a0 * a2 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* (r9,r10,r8) += a1 * a1 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r12\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l2 */
+ "movq %%r9, 16(%%rsi)\n"
+ "xorq %%r9, %%r9\n"
+ /* (r10,r8,r9) += 2 * a0 * a3 */
+ "movq %%r11, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* (r10,r8,r9) += 2 * a1 * a2 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ "adcq $0, %%r9\n"
+ /* Extract l3 */
+ "movq %%r10, 24(%%rsi)\n"
+ "xorq %%r10, %%r10\n"
+ /* (r8,r9,r10) += 2 * a1 * a3 */
+ "movq %%r12, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* (r8,r9,r10) += a2 * a2 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r13\n"
+ "addq %%rax, %%r8\n"
+ "adcq %%rdx, %%r9\n"
+ "adcq $0, %%r10\n"
+ /* Extract l4 */
+ "movq %%r8, 32(%%rsi)\n"
+ "xorq %%r8, %%r8\n"
+ /* (r9,r10,r8) += 2 * a2 * a3 */
+ "movq %%r13, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ "addq %%rax, %%r9\n"
+ "adcq %%rdx, %%r10\n"
+ "adcq $0, %%r8\n"
+ /* Extract l5 */
+ "movq %%r9, 40(%%rsi)\n"
+ /* (r10,r8) += a3 * a3 */
+ "movq %%r14, %%rax\n"
+ "mulq %%r14\n"
+ "addq %%rax, %%r10\n"
+ "adcq %%rdx, %%r8\n"
+ /* Extract l6 */
+ "movq %%r10, 48(%%rsi)\n"
+ /* Extract l7 */
+ "movq %%r8, 56(%%rsi)\n"
+ :
+ : "S"(l), "D"(a->d)
+ : "rax", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "cc", "memory");
+#else
+ /* 160 bit accumulator. */
+ uint64_t c0 = 0, c1 = 0;
+ uint32_t c2 = 0;
+
+ /* l[0..7] = a[0..3] * b[0..3]. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd_fast(a->d[3], a->d[3]);
+ extract_fast(l[6]);
+ VERIFY_CHECK(c1 == 0);
+ l[7] = c0;
+#endif
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint64_t l[8];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (64 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (64 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (64 - n));
+ r->d[3] = (r->d[3] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint64_t l[8];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = 0;
+ r1->d[3] = 0;
+ r2->d[0] = a->d[2];
+ r2->d[1] = a->d[3];
+ r2->d[2] = 0;
+ r2->d[3] = 0;
+}
+#endif
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint64_t l[8];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 6;
+ shiftlow = shift & 0x3F;
+ shifthigh = 64 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 448 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 384 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 320 ? (l[3 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 6] >> ((shift - 1) & 0x3f)) & 1);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h b/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
new file mode 100644
index 000000000..1319664f6
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_8x32.h
@@ -0,0 +1,19 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef struct {
+ uint32_t d[8];
+} secp256k1_scalar;
+
+#define SECP256K1_SCALAR_CONST(d7, d6, d5, d4, d3, d2, d1, d0) {{(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7)}}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
new file mode 100644
index 000000000..aae4f35c0
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_8x32_impl.h
@@ -0,0 +1,721 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+/* Limbs of the secp256k1 order. */
+#define SECP256K1_N_0 ((uint32_t)0xD0364141UL)
+#define SECP256K1_N_1 ((uint32_t)0xBFD25E8CUL)
+#define SECP256K1_N_2 ((uint32_t)0xAF48A03BUL)
+#define SECP256K1_N_3 ((uint32_t)0xBAAEDCE6UL)
+#define SECP256K1_N_4 ((uint32_t)0xFFFFFFFEUL)
+#define SECP256K1_N_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_7 ((uint32_t)0xFFFFFFFFUL)
+
+/* Limbs of 2^256 minus the secp256k1 order. */
+#define SECP256K1_N_C_0 (~SECP256K1_N_0 + 1)
+#define SECP256K1_N_C_1 (~SECP256K1_N_1)
+#define SECP256K1_N_C_2 (~SECP256K1_N_2)
+#define SECP256K1_N_C_3 (~SECP256K1_N_3)
+#define SECP256K1_N_C_4 (1)
+
+/* Limbs of half the secp256k1 order. */
+#define SECP256K1_N_H_0 ((uint32_t)0x681B20A0UL)
+#define SECP256K1_N_H_1 ((uint32_t)0xDFE92F46UL)
+#define SECP256K1_N_H_2 ((uint32_t)0x57A4501DUL)
+#define SECP256K1_N_H_3 ((uint32_t)0x5D576E73UL)
+#define SECP256K1_N_H_4 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_5 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_6 ((uint32_t)0xFFFFFFFFUL)
+#define SECP256K1_N_H_7 ((uint32_t)0x7FFFFFFFUL)
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) {
+ r->d[0] = 0;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) {
+ r->d[0] = v;
+ r->d[1] = 0;
+ r->d[2] = 0;
+ r->d[3] = 0;
+ r->d[4] = 0;
+ r->d[5] = 0;
+ r->d[6] = 0;
+ r->d[7] = 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK((offset + count - 1) >> 5 == offset >> 5);
+ return (a->d[offset >> 5] >> (offset & 0x1F)) & ((1 << count) - 1);
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ VERIFY_CHECK(count < 32);
+ VERIFY_CHECK(offset + count <= 256);
+ if ((offset + count - 1) >> 5 == offset >> 5) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+ } else {
+ VERIFY_CHECK((offset >> 5) + 1 < 8);
+ return ((a->d[offset >> 5] >> (offset & 0x1F)) | (a->d[(offset >> 5) + 1] << (32 - (offset & 0x1F)))) & ((((uint32_t)1) << count) - 1);
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_7); /* No need for a > check. */
+ no |= (a->d[6] < SECP256K1_N_6); /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_5); /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_4);
+ yes |= (a->d[4] > SECP256K1_N_4) & ~no;
+ no |= (a->d[3] < SECP256K1_N_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_1) & ~no;
+ yes |= (a->d[0] >= SECP256K1_N_0) & ~no;
+ return yes;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_reduce(secp256k1_scalar *r, uint32_t overflow) {
+ uint64_t t;
+ VERIFY_CHECK(overflow <= 1);
+ t = (uint64_t)r->d[0] + overflow * SECP256K1_N_C_0;
+ r->d[0] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[1] + overflow * SECP256K1_N_C_1;
+ r->d[1] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[2] + overflow * SECP256K1_N_C_2;
+ r->d[2] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[3] + overflow * SECP256K1_N_C_3;
+ r->d[3] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[4] + overflow * SECP256K1_N_C_4;
+ r->d[4] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[5];
+ r->d[5] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[6];
+ r->d[6] = t & 0xFFFFFFFFUL; t >>= 32;
+ t += (uint64_t)r->d[7];
+ r->d[7] = t & 0xFFFFFFFFUL;
+ return overflow;
+}
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ int overflow;
+ uint64_t t = (uint64_t)a->d[0] + b->d[0];
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[1] + b->d[1];
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[2] + b->d[2];
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[3] + b->d[3];
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[4] + b->d[4];
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[5] + b->d[5];
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[6] + b->d[6];
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)a->d[7] + b->d[7];
+ r->d[7] = t & 0xFFFFFFFFULL; t >>= 32;
+ overflow = t + secp256k1_scalar_check_overflow(r);
+ VERIFY_CHECK(overflow == 0 || overflow == 1);
+ secp256k1_scalar_reduce(r, overflow);
+ return overflow;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ uint64_t t;
+ VERIFY_CHECK(bit < 256);
+ bit += ((uint32_t) flag - 1) & 0x100; /* forcing (bit >> 5) > 7 makes this a noop */
+ t = (uint64_t)r->d[0] + (((uint32_t)((bit >> 5) == 0)) << (bit & 0x1F));
+ r->d[0] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[1] + (((uint32_t)((bit >> 5) == 1)) << (bit & 0x1F));
+ r->d[1] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[2] + (((uint32_t)((bit >> 5) == 2)) << (bit & 0x1F));
+ r->d[2] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[3] + (((uint32_t)((bit >> 5) == 3)) << (bit & 0x1F));
+ r->d[3] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[4] + (((uint32_t)((bit >> 5) == 4)) << (bit & 0x1F));
+ r->d[4] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[5] + (((uint32_t)((bit >> 5) == 5)) << (bit & 0x1F));
+ r->d[5] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[6] + (((uint32_t)((bit >> 5) == 6)) << (bit & 0x1F));
+ r->d[6] = t & 0xFFFFFFFFULL; t >>= 32;
+ t += (uint64_t)r->d[7] + (((uint32_t)((bit >> 5) == 7)) << (bit & 0x1F));
+ r->d[7] = t & 0xFFFFFFFFULL;
+#ifdef VERIFY
+ VERIFY_CHECK((t >> 32) == 0);
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ int over;
+ r->d[0] = (uint32_t)b32[31] | (uint32_t)b32[30] << 8 | (uint32_t)b32[29] << 16 | (uint32_t)b32[28] << 24;
+ r->d[1] = (uint32_t)b32[27] | (uint32_t)b32[26] << 8 | (uint32_t)b32[25] << 16 | (uint32_t)b32[24] << 24;
+ r->d[2] = (uint32_t)b32[23] | (uint32_t)b32[22] << 8 | (uint32_t)b32[21] << 16 | (uint32_t)b32[20] << 24;
+ r->d[3] = (uint32_t)b32[19] | (uint32_t)b32[18] << 8 | (uint32_t)b32[17] << 16 | (uint32_t)b32[16] << 24;
+ r->d[4] = (uint32_t)b32[15] | (uint32_t)b32[14] << 8 | (uint32_t)b32[13] << 16 | (uint32_t)b32[12] << 24;
+ r->d[5] = (uint32_t)b32[11] | (uint32_t)b32[10] << 8 | (uint32_t)b32[9] << 16 | (uint32_t)b32[8] << 24;
+ r->d[6] = (uint32_t)b32[7] | (uint32_t)b32[6] << 8 | (uint32_t)b32[5] << 16 | (uint32_t)b32[4] << 24;
+ r->d[7] = (uint32_t)b32[3] | (uint32_t)b32[2] << 8 | (uint32_t)b32[1] << 16 | (uint32_t)b32[0] << 24;
+ over = secp256k1_scalar_reduce(r, secp256k1_scalar_check_overflow(r));
+ if (overflow) {
+ *overflow = over;
+ }
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ bin[0] = a->d[7] >> 24; bin[1] = a->d[7] >> 16; bin[2] = a->d[7] >> 8; bin[3] = a->d[7];
+ bin[4] = a->d[6] >> 24; bin[5] = a->d[6] >> 16; bin[6] = a->d[6] >> 8; bin[7] = a->d[6];
+ bin[8] = a->d[5] >> 24; bin[9] = a->d[5] >> 16; bin[10] = a->d[5] >> 8; bin[11] = a->d[5];
+ bin[12] = a->d[4] >> 24; bin[13] = a->d[4] >> 16; bin[14] = a->d[4] >> 8; bin[15] = a->d[4];
+ bin[16] = a->d[3] >> 24; bin[17] = a->d[3] >> 16; bin[18] = a->d[3] >> 8; bin[19] = a->d[3];
+ bin[20] = a->d[2] >> 24; bin[21] = a->d[2] >> 16; bin[22] = a->d[2] >> 8; bin[23] = a->d[2];
+ bin[24] = a->d[1] >> 24; bin[25] = a->d[1] >> 16; bin[26] = a->d[1] >> 8; bin[27] = a->d[1];
+ bin[28] = a->d[0] >> 24; bin[29] = a->d[0] >> 16; bin[30] = a->d[0] >> 8; bin[31] = a->d[0];
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return (a->d[0] | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(a) == 0);
+ uint64_t t = (uint64_t)(~a->d[0]) + SECP256K1_N_0 + 1;
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[1]) + SECP256K1_N_1;
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[2]) + SECP256K1_N_2;
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[3]) + SECP256K1_N_3;
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[4]) + SECP256K1_N_4;
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[5]) + SECP256K1_N_5;
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[6]) + SECP256K1_N_6;
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(~a->d[7]) + SECP256K1_N_7;
+ r->d[7] = t & nonzero;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return ((a->d[0] ^ 1) | a->d[1] | a->d[2] | a->d[3] | a->d[4] | a->d[5] | a->d[6] | a->d[7]) == 0;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ int yes = 0;
+ int no = 0;
+ no |= (a->d[7] < SECP256K1_N_H_7);
+ yes |= (a->d[7] > SECP256K1_N_H_7) & ~no;
+ no |= (a->d[6] < SECP256K1_N_H_6) & ~yes; /* No need for a > check. */
+ no |= (a->d[5] < SECP256K1_N_H_5) & ~yes; /* No need for a > check. */
+ no |= (a->d[4] < SECP256K1_N_H_4) & ~yes; /* No need for a > check. */
+ no |= (a->d[3] < SECP256K1_N_H_3) & ~yes;
+ yes |= (a->d[3] > SECP256K1_N_H_3) & ~no;
+ no |= (a->d[2] < SECP256K1_N_H_2) & ~yes;
+ yes |= (a->d[2] > SECP256K1_N_H_2) & ~no;
+ no |= (a->d[1] < SECP256K1_N_H_1) & ~yes;
+ yes |= (a->d[1] > SECP256K1_N_H_1) & ~no;
+ yes |= (a->d[0] > SECP256K1_N_H_0) & ~no;
+ return yes;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ /* If we are flag = 0, mask = 00...00 and this is a no-op;
+ * if we are flag = 1, mask = 11...11 and this is identical to secp256k1_scalar_negate */
+ uint32_t mask = !flag - 1;
+ uint32_t nonzero = 0xFFFFFFFFUL * (secp256k1_scalar_is_zero(r) == 0);
+ uint64_t t = (uint64_t)(r->d[0] ^ mask) + ((SECP256K1_N_0 + 1) & mask);
+ r->d[0] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[1] ^ mask) + (SECP256K1_N_1 & mask);
+ r->d[1] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[2] ^ mask) + (SECP256K1_N_2 & mask);
+ r->d[2] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[3] ^ mask) + (SECP256K1_N_3 & mask);
+ r->d[3] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[4] ^ mask) + (SECP256K1_N_4 & mask);
+ r->d[4] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[5] ^ mask) + (SECP256K1_N_5 & mask);
+ r->d[5] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[6] ^ mask) + (SECP256K1_N_6 & mask);
+ r->d[6] = t & nonzero; t >>= 32;
+ t += (uint64_t)(r->d[7] ^ mask) + (SECP256K1_N_7 & mask);
+ r->d[7] = t & nonzero;
+ return 2 * (mask == 0) - 1;
+}
+
+
+/* Inspired by the macros in OpenSSL's crypto/bn/asm/x86_64-gcc.c. */
+
+/** Add a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* overflow is handled on the next line */ \
+ c2 += (c1 < th) ? 1 : 0; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK((c1 >= th) || (c2 != 0)); \
+}
+
+/** Add a*b to the number defined by (c0,c1). c1 must never overflow. */
+#define muladd_fast(a,b) { \
+ uint32_t tl, th; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ c0 += tl; /* overflow is handled on the next line */ \
+ th += (c0 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c1 += th; /* never overflows by contract (verified in the next line) */ \
+ VERIFY_CHECK(c1 >= th); \
+}
+
+/** Add 2*a*b to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define muladd2(a,b) { \
+ uint32_t tl, th, th2, tl2; \
+ { \
+ uint64_t t = (uint64_t)a * b; \
+ th = t >> 32; /* at most 0xFFFFFFFE */ \
+ tl = t; \
+ } \
+ th2 = th + th; /* at most 0xFFFFFFFE (in case th was 0x7FFFFFFF) */ \
+ c2 += (th2 < th) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((th2 >= th) || (c2 != 0)); \
+ tl2 = tl + tl; /* at most 0xFFFFFFFE (in case the lowest 63 bits of tl were 0x7FFFFFFF) */ \
+ th2 += (tl2 < tl) ? 1 : 0; /* at most 0xFFFFFFFF */ \
+ c0 += tl2; /* overflow is handled on the next line */ \
+ th2 += (c0 < tl2) ? 1 : 0; /* second overflow is handled on the next line */ \
+ c2 += (c0 < tl2) & (th2 == 0); /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c0 >= tl2) || (th2 != 0) || (c2 != 0)); \
+ c1 += th2; /* overflow is handled on the next line */ \
+ c2 += (c1 < th2) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 >= th2) || (c2 != 0)); \
+}
+
+/** Add a to the number defined by (c0,c1,c2). c2 must never overflow. */
+#define sumadd(a) { \
+ unsigned int over; \
+ c0 += (a); /* overflow is handled on the next line */ \
+ over = (c0 < (a)) ? 1 : 0; \
+ c1 += over; /* overflow is handled on the next line */ \
+ c2 += (c1 < over) ? 1 : 0; /* never overflows by contract */ \
+}
+
+/** Add a to the number defined by (c0,c1). c1 must never overflow, c2 must be zero. */
+#define sumadd_fast(a) { \
+ c0 += (a); /* overflow is handled on the next line */ \
+ c1 += (c0 < (a)) ? 1 : 0; /* never overflows by contract (verified the next line) */ \
+ VERIFY_CHECK((c1 != 0) | (c0 >= (a))); \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. */
+#define extract(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = c2; \
+ c2 = 0; \
+}
+
+/** Extract the lowest 32 bits of (c0,c1,c2) into n, and left shift the number 32 bits. c2 is required to be zero. */
+#define extract_fast(n) { \
+ (n) = c0; \
+ c0 = c1; \
+ c1 = 0; \
+ VERIFY_CHECK(c2 == 0); \
+}
+
+static void secp256k1_scalar_reduce_512(secp256k1_scalar *r, const uint32_t *l) {
+ uint64_t c;
+ uint32_t n0 = l[8], n1 = l[9], n2 = l[10], n3 = l[11], n4 = l[12], n5 = l[13], n6 = l[14], n7 = l[15];
+ uint32_t m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12;
+ uint32_t p0, p1, p2, p3, p4, p5, p6, p7, p8;
+
+ /* 96 bit accumulator. */
+ uint32_t c0, c1, c2;
+
+ /* Reduce 512 bits into 385. */
+ /* m[0..12] = l[0..7] + n[0..7] * SECP256K1_N_C. */
+ c0 = l[0]; c1 = 0; c2 = 0;
+ muladd_fast(n0, SECP256K1_N_C_0);
+ extract_fast(m0);
+ sumadd_fast(l[1]);
+ muladd(n1, SECP256K1_N_C_0);
+ muladd(n0, SECP256K1_N_C_1);
+ extract(m1);
+ sumadd(l[2]);
+ muladd(n2, SECP256K1_N_C_0);
+ muladd(n1, SECP256K1_N_C_1);
+ muladd(n0, SECP256K1_N_C_2);
+ extract(m2);
+ sumadd(l[3]);
+ muladd(n3, SECP256K1_N_C_0);
+ muladd(n2, SECP256K1_N_C_1);
+ muladd(n1, SECP256K1_N_C_2);
+ muladd(n0, SECP256K1_N_C_3);
+ extract(m3);
+ sumadd(l[4]);
+ muladd(n4, SECP256K1_N_C_0);
+ muladd(n3, SECP256K1_N_C_1);
+ muladd(n2, SECP256K1_N_C_2);
+ muladd(n1, SECP256K1_N_C_3);
+ sumadd(n0);
+ extract(m4);
+ sumadd(l[5]);
+ muladd(n5, SECP256K1_N_C_0);
+ muladd(n4, SECP256K1_N_C_1);
+ muladd(n3, SECP256K1_N_C_2);
+ muladd(n2, SECP256K1_N_C_3);
+ sumadd(n1);
+ extract(m5);
+ sumadd(l[6]);
+ muladd(n6, SECP256K1_N_C_0);
+ muladd(n5, SECP256K1_N_C_1);
+ muladd(n4, SECP256K1_N_C_2);
+ muladd(n3, SECP256K1_N_C_3);
+ sumadd(n2);
+ extract(m6);
+ sumadd(l[7]);
+ muladd(n7, SECP256K1_N_C_0);
+ muladd(n6, SECP256K1_N_C_1);
+ muladd(n5, SECP256K1_N_C_2);
+ muladd(n4, SECP256K1_N_C_3);
+ sumadd(n3);
+ extract(m7);
+ muladd(n7, SECP256K1_N_C_1);
+ muladd(n6, SECP256K1_N_C_2);
+ muladd(n5, SECP256K1_N_C_3);
+ sumadd(n4);
+ extract(m8);
+ muladd(n7, SECP256K1_N_C_2);
+ muladd(n6, SECP256K1_N_C_3);
+ sumadd(n5);
+ extract(m9);
+ muladd(n7, SECP256K1_N_C_3);
+ sumadd(n6);
+ extract(m10);
+ sumadd_fast(n7);
+ extract_fast(m11);
+ VERIFY_CHECK(c0 <= 1);
+ m12 = c0;
+
+ /* Reduce 385 bits into 258. */
+ /* p[0..8] = m[0..7] + m[8..12] * SECP256K1_N_C. */
+ c0 = m0; c1 = 0; c2 = 0;
+ muladd_fast(m8, SECP256K1_N_C_0);
+ extract_fast(p0);
+ sumadd_fast(m1);
+ muladd(m9, SECP256K1_N_C_0);
+ muladd(m8, SECP256K1_N_C_1);
+ extract(p1);
+ sumadd(m2);
+ muladd(m10, SECP256K1_N_C_0);
+ muladd(m9, SECP256K1_N_C_1);
+ muladd(m8, SECP256K1_N_C_2);
+ extract(p2);
+ sumadd(m3);
+ muladd(m11, SECP256K1_N_C_0);
+ muladd(m10, SECP256K1_N_C_1);
+ muladd(m9, SECP256K1_N_C_2);
+ muladd(m8, SECP256K1_N_C_3);
+ extract(p3);
+ sumadd(m4);
+ muladd(m12, SECP256K1_N_C_0);
+ muladd(m11, SECP256K1_N_C_1);
+ muladd(m10, SECP256K1_N_C_2);
+ muladd(m9, SECP256K1_N_C_3);
+ sumadd(m8);
+ extract(p4);
+ sumadd(m5);
+ muladd(m12, SECP256K1_N_C_1);
+ muladd(m11, SECP256K1_N_C_2);
+ muladd(m10, SECP256K1_N_C_3);
+ sumadd(m9);
+ extract(p5);
+ sumadd(m6);
+ muladd(m12, SECP256K1_N_C_2);
+ muladd(m11, SECP256K1_N_C_3);
+ sumadd(m10);
+ extract(p6);
+ sumadd_fast(m7);
+ muladd_fast(m12, SECP256K1_N_C_3);
+ sumadd_fast(m11);
+ extract_fast(p7);
+ p8 = c0 + m12;
+ VERIFY_CHECK(p8 <= 2);
+
+ /* Reduce 258 bits into 256. */
+ /* r[0..7] = p[0..7] + p[8] * SECP256K1_N_C. */
+ c = p0 + (uint64_t)SECP256K1_N_C_0 * p8;
+ r->d[0] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p1 + (uint64_t)SECP256K1_N_C_1 * p8;
+ r->d[1] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p2 + (uint64_t)SECP256K1_N_C_2 * p8;
+ r->d[2] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p3 + (uint64_t)SECP256K1_N_C_3 * p8;
+ r->d[3] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p4 + (uint64_t)p8;
+ r->d[4] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p5;
+ r->d[5] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p6;
+ r->d[6] = c & 0xFFFFFFFFUL; c >>= 32;
+ c += p7;
+ r->d[7] = c & 0xFFFFFFFFUL; c >>= 32;
+
+ /* Final reduction of r. */
+ secp256k1_scalar_reduce(r, c + secp256k1_scalar_check_overflow(r));
+}
+
+static void secp256k1_scalar_mul_512(uint32_t *l, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7] * b[0..7]. */
+ muladd_fast(a->d[0], b->d[0]);
+ extract_fast(l[0]);
+ muladd(a->d[0], b->d[1]);
+ muladd(a->d[1], b->d[0]);
+ extract(l[1]);
+ muladd(a->d[0], b->d[2]);
+ muladd(a->d[1], b->d[1]);
+ muladd(a->d[2], b->d[0]);
+ extract(l[2]);
+ muladd(a->d[0], b->d[3]);
+ muladd(a->d[1], b->d[2]);
+ muladd(a->d[2], b->d[1]);
+ muladd(a->d[3], b->d[0]);
+ extract(l[3]);
+ muladd(a->d[0], b->d[4]);
+ muladd(a->d[1], b->d[3]);
+ muladd(a->d[2], b->d[2]);
+ muladd(a->d[3], b->d[1]);
+ muladd(a->d[4], b->d[0]);
+ extract(l[4]);
+ muladd(a->d[0], b->d[5]);
+ muladd(a->d[1], b->d[4]);
+ muladd(a->d[2], b->d[3]);
+ muladd(a->d[3], b->d[2]);
+ muladd(a->d[4], b->d[1]);
+ muladd(a->d[5], b->d[0]);
+ extract(l[5]);
+ muladd(a->d[0], b->d[6]);
+ muladd(a->d[1], b->d[5]);
+ muladd(a->d[2], b->d[4]);
+ muladd(a->d[3], b->d[3]);
+ muladd(a->d[4], b->d[2]);
+ muladd(a->d[5], b->d[1]);
+ muladd(a->d[6], b->d[0]);
+ extract(l[6]);
+ muladd(a->d[0], b->d[7]);
+ muladd(a->d[1], b->d[6]);
+ muladd(a->d[2], b->d[5]);
+ muladd(a->d[3], b->d[4]);
+ muladd(a->d[4], b->d[3]);
+ muladd(a->d[5], b->d[2]);
+ muladd(a->d[6], b->d[1]);
+ muladd(a->d[7], b->d[0]);
+ extract(l[7]);
+ muladd(a->d[1], b->d[7]);
+ muladd(a->d[2], b->d[6]);
+ muladd(a->d[3], b->d[5]);
+ muladd(a->d[4], b->d[4]);
+ muladd(a->d[5], b->d[3]);
+ muladd(a->d[6], b->d[2]);
+ muladd(a->d[7], b->d[1]);
+ extract(l[8]);
+ muladd(a->d[2], b->d[7]);
+ muladd(a->d[3], b->d[6]);
+ muladd(a->d[4], b->d[5]);
+ muladd(a->d[5], b->d[4]);
+ muladd(a->d[6], b->d[3]);
+ muladd(a->d[7], b->d[2]);
+ extract(l[9]);
+ muladd(a->d[3], b->d[7]);
+ muladd(a->d[4], b->d[6]);
+ muladd(a->d[5], b->d[5]);
+ muladd(a->d[6], b->d[4]);
+ muladd(a->d[7], b->d[3]);
+ extract(l[10]);
+ muladd(a->d[4], b->d[7]);
+ muladd(a->d[5], b->d[6]);
+ muladd(a->d[6], b->d[5]);
+ muladd(a->d[7], b->d[4]);
+ extract(l[11]);
+ muladd(a->d[5], b->d[7]);
+ muladd(a->d[6], b->d[6]);
+ muladd(a->d[7], b->d[5]);
+ extract(l[12]);
+ muladd(a->d[6], b->d[7]);
+ muladd(a->d[7], b->d[6]);
+ extract(l[13]);
+ muladd_fast(a->d[7], b->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+static void secp256k1_scalar_sqr_512(uint32_t *l, const secp256k1_scalar *a) {
+ /* 96 bit accumulator. */
+ uint32_t c0 = 0, c1 = 0, c2 = 0;
+
+ /* l[0..15] = a[0..7]^2. */
+ muladd_fast(a->d[0], a->d[0]);
+ extract_fast(l[0]);
+ muladd2(a->d[0], a->d[1]);
+ extract(l[1]);
+ muladd2(a->d[0], a->d[2]);
+ muladd(a->d[1], a->d[1]);
+ extract(l[2]);
+ muladd2(a->d[0], a->d[3]);
+ muladd2(a->d[1], a->d[2]);
+ extract(l[3]);
+ muladd2(a->d[0], a->d[4]);
+ muladd2(a->d[1], a->d[3]);
+ muladd(a->d[2], a->d[2]);
+ extract(l[4]);
+ muladd2(a->d[0], a->d[5]);
+ muladd2(a->d[1], a->d[4]);
+ muladd2(a->d[2], a->d[3]);
+ extract(l[5]);
+ muladd2(a->d[0], a->d[6]);
+ muladd2(a->d[1], a->d[5]);
+ muladd2(a->d[2], a->d[4]);
+ muladd(a->d[3], a->d[3]);
+ extract(l[6]);
+ muladd2(a->d[0], a->d[7]);
+ muladd2(a->d[1], a->d[6]);
+ muladd2(a->d[2], a->d[5]);
+ muladd2(a->d[3], a->d[4]);
+ extract(l[7]);
+ muladd2(a->d[1], a->d[7]);
+ muladd2(a->d[2], a->d[6]);
+ muladd2(a->d[3], a->d[5]);
+ muladd(a->d[4], a->d[4]);
+ extract(l[8]);
+ muladd2(a->d[2], a->d[7]);
+ muladd2(a->d[3], a->d[6]);
+ muladd2(a->d[4], a->d[5]);
+ extract(l[9]);
+ muladd2(a->d[3], a->d[7]);
+ muladd2(a->d[4], a->d[6]);
+ muladd(a->d[5], a->d[5]);
+ extract(l[10]);
+ muladd2(a->d[4], a->d[7]);
+ muladd2(a->d[5], a->d[6]);
+ extract(l[11]);
+ muladd2(a->d[5], a->d[7]);
+ muladd(a->d[6], a->d[6]);
+ extract(l[12]);
+ muladd2(a->d[6], a->d[7]);
+ extract(l[13]);
+ muladd_fast(a->d[7], a->d[7]);
+ extract_fast(l[14]);
+ VERIFY_CHECK(c1 == 0);
+ l[15] = c0;
+}
+
+#undef sumadd
+#undef sumadd_fast
+#undef muladd
+#undef muladd_fast
+#undef muladd2
+#undef extract
+#undef extract_fast
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ uint32_t l[16];
+ secp256k1_scalar_mul_512(l, a, b);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = r->d[0] & ((1 << n) - 1);
+ r->d[0] = (r->d[0] >> n) + (r->d[1] << (32 - n));
+ r->d[1] = (r->d[1] >> n) + (r->d[2] << (32 - n));
+ r->d[2] = (r->d[2] >> n) + (r->d[3] << (32 - n));
+ r->d[3] = (r->d[3] >> n) + (r->d[4] << (32 - n));
+ r->d[4] = (r->d[4] >> n) + (r->d[5] << (32 - n));
+ r->d[5] = (r->d[5] >> n) + (r->d[6] << (32 - n));
+ r->d[6] = (r->d[6] >> n) + (r->d[7] << (32 - n));
+ r->d[7] = (r->d[7] >> n);
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ uint32_t l[16];
+ secp256k1_scalar_sqr_512(l, a);
+ secp256k1_scalar_reduce_512(r, l);
+}
+
+#ifdef USE_ENDOMORPHISM
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ r1->d[0] = a->d[0];
+ r1->d[1] = a->d[1];
+ r1->d[2] = a->d[2];
+ r1->d[3] = a->d[3];
+ r1->d[4] = 0;
+ r1->d[5] = 0;
+ r1->d[6] = 0;
+ r1->d[7] = 0;
+ r2->d[0] = a->d[4];
+ r2->d[1] = a->d[5];
+ r2->d[2] = a->d[6];
+ r2->d[3] = a->d[7];
+ r2->d[4] = 0;
+ r2->d[5] = 0;
+ r2->d[6] = 0;
+ r2->d[7] = 0;
+}
+#endif
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return ((a->d[0] ^ b->d[0]) | (a->d[1] ^ b->d[1]) | (a->d[2] ^ b->d[2]) | (a->d[3] ^ b->d[3]) | (a->d[4] ^ b->d[4]) | (a->d[5] ^ b->d[5]) | (a->d[6] ^ b->d[6]) | (a->d[7] ^ b->d[7])) == 0;
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_mul_shift_var(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b, unsigned int shift) {
+ uint32_t l[16];
+ unsigned int shiftlimbs;
+ unsigned int shiftlow;
+ unsigned int shifthigh;
+ VERIFY_CHECK(shift >= 256);
+ secp256k1_scalar_mul_512(l, a, b);
+ shiftlimbs = shift >> 5;
+ shiftlow = shift & 0x1F;
+ shifthigh = 32 - shiftlow;
+ r->d[0] = shift < 512 ? (l[0 + shiftlimbs] >> shiftlow | (shift < 480 && shiftlow ? (l[1 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[1] = shift < 480 ? (l[1 + shiftlimbs] >> shiftlow | (shift < 448 && shiftlow ? (l[2 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[2] = shift < 448 ? (l[2 + shiftlimbs] >> shiftlow | (shift < 416 && shiftlow ? (l[3 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[3] = shift < 416 ? (l[3 + shiftlimbs] >> shiftlow | (shift < 384 && shiftlow ? (l[4 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[4] = shift < 384 ? (l[4 + shiftlimbs] >> shiftlow | (shift < 352 && shiftlow ? (l[5 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[5] = shift < 352 ? (l[5 + shiftlimbs] >> shiftlow | (shift < 320 && shiftlow ? (l[6 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[6] = shift < 320 ? (l[6 + shiftlimbs] >> shiftlow | (shift < 288 && shiftlow ? (l[7 + shiftlimbs] << shifthigh) : 0)) : 0;
+ r->d[7] = shift < 288 ? (l[7 + shiftlimbs] >> shiftlow) : 0;
+ secp256k1_scalar_cadd_bit(r, 0, (l[(shift - 1) >> 5] >> ((shift - 1) & 0x1f)) & 1);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
new file mode 100644
index 000000000..f5b237640
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_impl.h
@@ -0,0 +1,370 @@
+/**********************************************************************
+ * Copyright (c) 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_IMPL_H_
+#define _SECP256K1_SCALAR_IMPL_H_
+
+#include "group.h"
+#include "scalar.h"
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#if defined(EXHAUSTIVE_TEST_ORDER)
+#include "scalar_low_impl.h"
+#elif defined(USE_SCALAR_4X64)
+#include "scalar_4x64_impl.h"
+#elif defined(USE_SCALAR_8X32)
+#include "scalar_8x32_impl.h"
+#else
+#error "Please select scalar implementation"
+#endif
+
+#ifndef USE_NUM_NONE
+static void secp256k1_scalar_get_num(secp256k1_num *r, const secp256k1_scalar *a) {
+ unsigned char c[32];
+ secp256k1_scalar_get_b32(c, a);
+ secp256k1_num_set_bin(r, c, 32);
+}
+
+/** secp256k1 curve order, see secp256k1_ecdsa_const_order_as_fe in ecdsa_impl.h */
+static void secp256k1_scalar_order_get_num(secp256k1_num *r) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ static const unsigned char order[32] = {
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,EXHAUSTIVE_TEST_ORDER
+ };
+#else
+ static const unsigned char order[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+#endif
+ secp256k1_num_set_bin(r, order, 32);
+}
+#endif
+
+static void secp256k1_scalar_inverse(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(EXHAUSTIVE_TEST_ORDER)
+ int i;
+ *r = 0;
+ for (i = 0; i < EXHAUSTIVE_TEST_ORDER; i++)
+ if ((i * *x) % EXHAUSTIVE_TEST_ORDER == 1)
+ *r = i;
+ /* If this VERIFY_CHECK triggers we were given a noninvertible scalar (and thus
+ * have a composite group order; fix it in exhaustive_tests.c). */
+ VERIFY_CHECK(*r != 0);
+}
+#else
+ secp256k1_scalar *t;
+ int i;
+ /* First compute x ^ (2^N - 1) for some values of N. */
+ secp256k1_scalar x2, x3, x4, x6, x7, x8, x15, x30, x60, x120, x127;
+
+ secp256k1_scalar_sqr(&x2, x);
+ secp256k1_scalar_mul(&x2, &x2, x);
+
+ secp256k1_scalar_sqr(&x3, &x2);
+ secp256k1_scalar_mul(&x3, &x3, x);
+
+ secp256k1_scalar_sqr(&x4, &x3);
+ secp256k1_scalar_mul(&x4, &x4, x);
+
+ secp256k1_scalar_sqr(&x6, &x4);
+ secp256k1_scalar_sqr(&x6, &x6);
+ secp256k1_scalar_mul(&x6, &x6, &x2);
+
+ secp256k1_scalar_sqr(&x7, &x6);
+ secp256k1_scalar_mul(&x7, &x7, x);
+
+ secp256k1_scalar_sqr(&x8, &x7);
+ secp256k1_scalar_mul(&x8, &x8, x);
+
+ secp256k1_scalar_sqr(&x15, &x8);
+ for (i = 0; i < 6; i++) {
+ secp256k1_scalar_sqr(&x15, &x15);
+ }
+ secp256k1_scalar_mul(&x15, &x15, &x7);
+
+ secp256k1_scalar_sqr(&x30, &x15);
+ for (i = 0; i < 14; i++) {
+ secp256k1_scalar_sqr(&x30, &x30);
+ }
+ secp256k1_scalar_mul(&x30, &x30, &x15);
+
+ secp256k1_scalar_sqr(&x60, &x30);
+ for (i = 0; i < 29; i++) {
+ secp256k1_scalar_sqr(&x60, &x60);
+ }
+ secp256k1_scalar_mul(&x60, &x60, &x30);
+
+ secp256k1_scalar_sqr(&x120, &x60);
+ for (i = 0; i < 59; i++) {
+ secp256k1_scalar_sqr(&x120, &x120);
+ }
+ secp256k1_scalar_mul(&x120, &x120, &x60);
+
+ secp256k1_scalar_sqr(&x127, &x120);
+ for (i = 0; i < 6; i++) {
+ secp256k1_scalar_sqr(&x127, &x127);
+ }
+ secp256k1_scalar_mul(&x127, &x127, &x7);
+
+ /* Then accumulate the final result (t starts at x127). */
+ t = &x127;
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 3; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 5; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x4); /* 1111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 4; i++) { /* 000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 10; i++) { /* 0000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 4; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x3); /* 111 */
+ for (i = 0; i < 9; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x8); /* 11111111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x4); /* 1111 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 5; i++) { /* 000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 4; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 2; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 8; i++) { /* 000000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 3; i++) { /* 0 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, &x2); /* 11 */
+ for (i = 0; i < 3; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 6; i++) { /* 00000 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(t, t, x); /* 1 */
+ for (i = 0; i < 8; i++) { /* 00 */
+ secp256k1_scalar_sqr(t, t);
+ }
+ secp256k1_scalar_mul(r, t, &x6); /* 111111 */
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ return !(a->d[0] & 1);
+}
+#endif
+
+static void secp256k1_scalar_inverse_var(secp256k1_scalar *r, const secp256k1_scalar *x) {
+#if defined(USE_SCALAR_INV_BUILTIN)
+ secp256k1_scalar_inverse(r, x);
+#elif defined(USE_SCALAR_INV_NUM)
+ unsigned char b[32];
+ secp256k1_num n, m;
+ secp256k1_scalar t = *x;
+ secp256k1_scalar_get_b32(b, &t);
+ secp256k1_num_set_bin(&n, b, 32);
+ secp256k1_scalar_order_get_num(&m);
+ secp256k1_num_mod_inverse(&n, &n, &m);
+ secp256k1_num_get_bin(b, 32, &n);
+ secp256k1_scalar_set_b32(r, b, NULL);
+ /* Verify that the inverse was computed correctly, without GMP code. */
+ secp256k1_scalar_mul(&t, &t, r);
+ CHECK(secp256k1_scalar_is_one(&t));
+#else
+#error "Please select scalar inverse implementation"
+#endif
+}
+
+#ifdef USE_ENDOMORPHISM
+#if defined(EXHAUSTIVE_TEST_ORDER)
+/**
+ * Find k1 and k2 given k, such that k1 + k2 * lambda == k mod n; unlike in the
+ * full case we don't bother making k1 and k2 be small, we just want them to be
+ * nontrivial to get full test coverage for the exhaustive tests. We therefore
+ * (arbitrarily) set k2 = k + 5 and k1 = k - k2 * lambda.
+ */
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r2 = (*a + 5) % EXHAUSTIVE_TEST_ORDER;
+ *r1 = (*a + (EXHAUSTIVE_TEST_ORDER - *r2) * EXHAUSTIVE_TEST_LAMBDA) % EXHAUSTIVE_TEST_ORDER;
+}
+#else
+/**
+ * The Secp256k1 curve has an endomorphism, where lambda * (x, y) = (beta * x, y), where
+ * lambda is {0x53,0x63,0xad,0x4c,0xc0,0x5c,0x30,0xe0,0xa5,0x26,0x1c,0x02,0x88,0x12,0x64,0x5a,
+ * 0x12,0x2e,0x22,0xea,0x20,0x81,0x66,0x78,0xdf,0x02,0x96,0x7c,0x1b,0x23,0xbd,0x72}
+ *
+ * "Guide to Elliptic Curve Cryptography" (Hankerson, Menezes, Vanstone) gives an algorithm
+ * (algorithm 3.74) to find k1 and k2 given k, such that k1 + k2 * lambda == k mod n, and k1
+ * and k2 have a small size.
+ * It relies on constants a1, b1, a2, b2. These constants for the value of lambda above are:
+ *
+ * - a1 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ * - b1 = -{0xe4,0x43,0x7e,0xd6,0x01,0x0e,0x88,0x28,0x6f,0x54,0x7f,0xa9,0x0a,0xbf,0xe4,0xc3}
+ * - a2 = {0x01,0x14,0xca,0x50,0xf7,0xa8,0xe2,0xf3,0xf6,0x57,0xc1,0x10,0x8d,0x9d,0x44,0xcf,0xd8}
+ * - b2 = {0x30,0x86,0xd2,0x21,0xa7,0xd4,0x6b,0xcd,0xe8,0x6c,0x90,0xe4,0x92,0x84,0xeb,0x15}
+ *
+ * The algorithm then computes c1 = round(b1 * k / n) and c2 = round(b2 * k / n), and gives
+ * k1 = k - (c1*a1 + c2*a2) and k2 = -(c1*b1 + c2*b2). Instead, we use modular arithmetic, and
+ * compute k1 as k - k2 * lambda, avoiding the need for constants a1 and a2.
+ *
+ * g1, g2 are precomputed constants used to replace division with a rounded multiplication
+ * when decomposing the scalar for an endomorphism-based point multiplication.
+ *
+ * The possibility of using precomputed estimates is mentioned in "Guide to Elliptic Curve
+ * Cryptography" (Hankerson, Menezes, Vanstone) in section 3.5.
+ *
+ * The derivation is described in the paper "Efficient Software Implementation of Public-Key
+ * Cryptography on Sensor Networks Using the MSP430X Microcontroller" (Gouvea, Oliveira, Lopez),
+ * Section 4.3 (here we use a somewhat higher-precision estimate):
+ * d = a1*b2 - b1*a2
+ * g1 = round((2^272)*b2/d)
+ * g2 = round((2^272)*b1/d)
+ *
+ * (Note that 'd' is also equal to the curve order here because [a1,b1] and [a2,b2] are found
+ * as outputs of the Extended Euclidean Algorithm on inputs 'order' and 'lambda').
+ *
+ * The function below splits a in r1 and r2, such that r1 + lambda * r2 == a (mod order).
+ */
+
+static void secp256k1_scalar_split_lambda(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ secp256k1_scalar c1, c2;
+ static const secp256k1_scalar minus_lambda = SECP256K1_SCALAR_CONST(
+ 0xAC9C52B3UL, 0x3FA3CF1FUL, 0x5AD9E3FDUL, 0x77ED9BA4UL,
+ 0xA880B9FCUL, 0x8EC739C2UL, 0xE0CFC810UL, 0xB51283CFUL
+ );
+ static const secp256k1_scalar minus_b1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00000000UL,
+ 0xE4437ED6UL, 0x010E8828UL, 0x6F547FA9UL, 0x0ABFE4C3UL
+ );
+ static const secp256k1_scalar minus_b2 = SECP256K1_SCALAR_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFEUL,
+ 0x8A280AC5UL, 0x0774346DUL, 0xD765CDA8UL, 0x3DB1562CUL
+ );
+ static const secp256k1_scalar g1 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x00003086UL,
+ 0xD221A7D4UL, 0x6BCDE86CUL, 0x90E49284UL, 0xEB153DABUL
+ );
+ static const secp256k1_scalar g2 = SECP256K1_SCALAR_CONST(
+ 0x00000000UL, 0x00000000UL, 0x00000000UL, 0x0000E443UL,
+ 0x7ED6010EUL, 0x88286F54UL, 0x7FA90ABFUL, 0xE4C42212UL
+ );
+ VERIFY_CHECK(r1 != a);
+ VERIFY_CHECK(r2 != a);
+ /* these _var calls are constant time since the shift amount is constant */
+ secp256k1_scalar_mul_shift_var(&c1, a, &g1, 272);
+ secp256k1_scalar_mul_shift_var(&c2, a, &g2, 272);
+ secp256k1_scalar_mul(&c1, &c1, &minus_b1);
+ secp256k1_scalar_mul(&c2, &c2, &minus_b2);
+ secp256k1_scalar_add(r2, &c1, &c2);
+ secp256k1_scalar_mul(r1, r2, &minus_lambda);
+ secp256k1_scalar_add(r1, r1, a);
+}
+#endif
+#endif
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_low.h b/crypto/secp256k1/libsecp256k1/src/scalar_low.h
new file mode 100644
index 000000000..5574c44c7
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_low.h
@@ -0,0 +1,15 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_
+#define _SECP256K1_SCALAR_REPR_
+
+#include
+
+/** A scalar modulo the group order of the secp256k1 curve. */
+typedef uint32_t secp256k1_scalar;
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h b/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h
new file mode 100644
index 000000000..4f94441f4
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/scalar_low_impl.h
@@ -0,0 +1,114 @@
+/**********************************************************************
+ * Copyright (c) 2015 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_SCALAR_REPR_IMPL_H_
+#define _SECP256K1_SCALAR_REPR_IMPL_H_
+
+#include "scalar.h"
+
+#include
+
+SECP256K1_INLINE static int secp256k1_scalar_is_even(const secp256k1_scalar *a) {
+ return !(*a & 1);
+}
+
+SECP256K1_INLINE static void secp256k1_scalar_clear(secp256k1_scalar *r) { *r = 0; }
+SECP256K1_INLINE static void secp256k1_scalar_set_int(secp256k1_scalar *r, unsigned int v) { *r = v; }
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ if (offset < 32)
+ return ((*a >> offset) & ((((uint32_t)1) << count) - 1));
+ else
+ return 0;
+}
+
+SECP256K1_INLINE static unsigned int secp256k1_scalar_get_bits_var(const secp256k1_scalar *a, unsigned int offset, unsigned int count) {
+ return secp256k1_scalar_get_bits(a, offset, count);
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_check_overflow(const secp256k1_scalar *a) { return *a >= EXHAUSTIVE_TEST_ORDER; }
+
+static int secp256k1_scalar_add(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a + *b) % EXHAUSTIVE_TEST_ORDER;
+ return *r < *b;
+}
+
+static void secp256k1_scalar_cadd_bit(secp256k1_scalar *r, unsigned int bit, int flag) {
+ if (flag && bit < 32)
+ *r += (1 << bit);
+#ifdef VERIFY
+ VERIFY_CHECK(secp256k1_scalar_check_overflow(r) == 0);
+#endif
+}
+
+static void secp256k1_scalar_set_b32(secp256k1_scalar *r, const unsigned char *b32, int *overflow) {
+ const int base = 0x100 % EXHAUSTIVE_TEST_ORDER;
+ int i;
+ *r = 0;
+ for (i = 0; i < 32; i++) {
+ *r = ((*r * base) + b32[i]) % EXHAUSTIVE_TEST_ORDER;
+ }
+ /* just deny overflow, it basically always happens */
+ if (overflow) *overflow = 0;
+}
+
+static void secp256k1_scalar_get_b32(unsigned char *bin, const secp256k1_scalar* a) {
+ memset(bin, 0, 32);
+ bin[28] = *a >> 24; bin[29] = *a >> 16; bin[30] = *a >> 8; bin[31] = *a;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_zero(const secp256k1_scalar *a) {
+ return *a == 0;
+}
+
+static void secp256k1_scalar_negate(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ if (*a == 0) {
+ *r = 0;
+ } else {
+ *r = EXHAUSTIVE_TEST_ORDER - *a;
+ }
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_is_one(const secp256k1_scalar *a) {
+ return *a == 1;
+}
+
+static int secp256k1_scalar_is_high(const secp256k1_scalar *a) {
+ return *a > EXHAUSTIVE_TEST_ORDER / 2;
+}
+
+static int secp256k1_scalar_cond_negate(secp256k1_scalar *r, int flag) {
+ if (flag) secp256k1_scalar_negate(r, r);
+ return flag ? -1 : 1;
+}
+
+static void secp256k1_scalar_mul(secp256k1_scalar *r, const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ *r = (*a * *b) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static int secp256k1_scalar_shr_int(secp256k1_scalar *r, int n) {
+ int ret;
+ VERIFY_CHECK(n > 0);
+ VERIFY_CHECK(n < 16);
+ ret = *r & ((1 << n) - 1);
+ *r >>= n;
+ return ret;
+}
+
+static void secp256k1_scalar_sqr(secp256k1_scalar *r, const secp256k1_scalar *a) {
+ *r = (*a * *a) % EXHAUSTIVE_TEST_ORDER;
+}
+
+static void secp256k1_scalar_split_128(secp256k1_scalar *r1, secp256k1_scalar *r2, const secp256k1_scalar *a) {
+ *r1 = *a;
+ *r2 = 0;
+}
+
+SECP256K1_INLINE static int secp256k1_scalar_eq(const secp256k1_scalar *a, const secp256k1_scalar *b) {
+ return *a == *b;
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/secp256k1.c b/crypto/secp256k1/libsecp256k1/src/secp256k1.c
new file mode 100755
index 000000000..7d637bfad
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/secp256k1.c
@@ -0,0 +1,559 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#include "include/secp256k1.h"
+
+#include "util.h"
+#include "num_impl.h"
+#include "field_impl.h"
+#include "scalar_impl.h"
+#include "group_impl.h"
+#include "ecmult_impl.h"
+#include "ecmult_const_impl.h"
+#include "ecmult_gen_impl.h"
+#include "ecdsa_impl.h"
+#include "eckey_impl.h"
+#include "hash_impl.h"
+
+#define ARG_CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ secp256k1_callback_call(&ctx->illegal_callback, #cond); \
+ return 0; \
+ } \
+} while(0)
+
+static void default_illegal_callback_fn(const char* str, void* data) {
+ fprintf(stderr, "[libsecp256k1] illegal argument: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_illegal_callback = {
+ default_illegal_callback_fn,
+ NULL
+};
+
+static void default_error_callback_fn(const char* str, void* data) {
+ fprintf(stderr, "[libsecp256k1] internal consistency check failed: %s\n", str);
+ abort();
+}
+
+static const secp256k1_callback default_error_callback = {
+ default_error_callback_fn,
+ NULL
+};
+
+
+struct secp256k1_context_struct {
+ secp256k1_ecmult_context ecmult_ctx;
+ secp256k1_ecmult_gen_context ecmult_gen_ctx;
+ secp256k1_callback illegal_callback;
+ secp256k1_callback error_callback;
+};
+
+secp256k1_context* secp256k1_context_create(unsigned int flags) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&default_error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = default_illegal_callback;
+ ret->error_callback = default_error_callback;
+
+ if (EXPECT((flags & SECP256K1_FLAGS_TYPE_MASK) != SECP256K1_FLAGS_TYPE_CONTEXT, 0)) {
+ secp256k1_callback_call(&ret->illegal_callback,
+ "Invalid flags");
+ free(ret);
+ return NULL;
+ }
+
+ secp256k1_ecmult_context_init(&ret->ecmult_ctx);
+ secp256k1_ecmult_gen_context_init(&ret->ecmult_gen_ctx);
+
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_SIGN) {
+ secp256k1_ecmult_gen_context_build(&ret->ecmult_gen_ctx, &ret->error_callback);
+ }
+ if (flags & SECP256K1_FLAGS_BIT_CONTEXT_VERIFY) {
+ secp256k1_ecmult_context_build(&ret->ecmult_ctx, &ret->error_callback);
+ }
+
+ return ret;
+}
+
+secp256k1_context* secp256k1_context_clone(const secp256k1_context* ctx) {
+ secp256k1_context* ret = (secp256k1_context*)checked_malloc(&ctx->error_callback, sizeof(secp256k1_context));
+ ret->illegal_callback = ctx->illegal_callback;
+ ret->error_callback = ctx->error_callback;
+ secp256k1_ecmult_context_clone(&ret->ecmult_ctx, &ctx->ecmult_ctx, &ctx->error_callback);
+ secp256k1_ecmult_gen_context_clone(&ret->ecmult_gen_ctx, &ctx->ecmult_gen_ctx, &ctx->error_callback);
+ return ret;
+}
+
+void secp256k1_context_destroy(secp256k1_context* ctx) {
+ if (ctx != NULL) {
+ secp256k1_ecmult_context_clear(&ctx->ecmult_ctx);
+ secp256k1_ecmult_gen_context_clear(&ctx->ecmult_gen_ctx);
+
+ free(ctx);
+ }
+}
+
+void secp256k1_context_set_illegal_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_illegal_callback_fn;
+ }
+ ctx->illegal_callback.fn = fun;
+ ctx->illegal_callback.data = data;
+}
+
+void secp256k1_context_set_error_callback(secp256k1_context* ctx, void (*fun)(const char* message, void* data), const void* data) {
+ if (fun == NULL) {
+ fun = default_error_callback_fn;
+ }
+ ctx->error_callback.fn = fun;
+ ctx->error_callback.data = data;
+}
+
+static int secp256k1_pubkey_load(const secp256k1_context* ctx, secp256k1_ge* ge, const secp256k1_pubkey* pubkey) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ /* When the secp256k1_ge_storage type is exactly 64 byte, use its
+ * representation inside secp256k1_pubkey, as conversion is very fast.
+ * Note that secp256k1_pubkey_save must use the same representation. */
+ secp256k1_ge_storage s;
+ memcpy(&s, &pubkey->data[0], 64);
+ secp256k1_ge_from_storage(ge, &s);
+ } else {
+ /* Otherwise, fall back to 32-byte big endian for X and Y. */
+ secp256k1_fe x, y;
+ secp256k1_fe_set_b32(&x, pubkey->data);
+ secp256k1_fe_set_b32(&y, pubkey->data + 32);
+ secp256k1_ge_set_xy(ge, &x, &y);
+ }
+ ARG_CHECK(!secp256k1_fe_is_zero(&ge->x));
+ return 1;
+}
+
+static void secp256k1_pubkey_save(secp256k1_pubkey* pubkey, secp256k1_ge* ge) {
+ if (sizeof(secp256k1_ge_storage) == 64) {
+ secp256k1_ge_storage s;
+ secp256k1_ge_to_storage(&s, ge);
+ memcpy(&pubkey->data[0], &s, 64);
+ } else {
+ VERIFY_CHECK(!secp256k1_ge_is_infinity(ge));
+ secp256k1_fe_normalize_var(&ge->x);
+ secp256k1_fe_normalize_var(&ge->y);
+ secp256k1_fe_get_b32(pubkey->data, &ge->x);
+ secp256k1_fe_get_b32(pubkey->data + 32, &ge->y);
+ }
+}
+
+int secp256k1_ec_pubkey_parse(const secp256k1_context* ctx, secp256k1_pubkey* pubkey, const unsigned char *input, size_t inputlen) {
+ secp256k1_ge Q;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(input != NULL);
+ if (!secp256k1_eckey_pubkey_parse(&Q, input, inputlen)) {
+ return 0;
+ }
+ secp256k1_pubkey_save(pubkey, &Q);
+ secp256k1_ge_clear(&Q);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_serialize(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_pubkey* pubkey, unsigned int flags) {
+ secp256k1_ge Q;
+ size_t len;
+ int ret = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(*outputlen >= ((flags & SECP256K1_FLAGS_BIT_COMPRESSION) ? 33 : 65));
+ len = *outputlen;
+ *outputlen = 0;
+ ARG_CHECK(output != NULL);
+ memset(output, 0, len);
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK((flags & SECP256K1_FLAGS_TYPE_MASK) == SECP256K1_FLAGS_TYPE_COMPRESSION);
+ if (secp256k1_pubkey_load(ctx, &Q, pubkey)) {
+ ret = secp256k1_eckey_pubkey_serialize(&Q, output, &len, flags & SECP256K1_FLAGS_BIT_COMPRESSION);
+ if (ret) {
+ *outputlen = len;
+ }
+ }
+ return ret;
+}
+
+static void secp256k1_ecdsa_signature_load(const secp256k1_context* ctx, secp256k1_scalar* r, secp256k1_scalar* s, const secp256k1_ecdsa_signature* sig) {
+ (void)ctx;
+ if (sizeof(secp256k1_scalar) == 32) {
+ /* When the secp256k1_scalar type is exactly 32 byte, use its
+ * representation inside secp256k1_ecdsa_signature, as conversion is very fast.
+ * Note that secp256k1_ecdsa_signature_save must use the same representation. */
+ memcpy(r, &sig->data[0], 32);
+ memcpy(s, &sig->data[32], 32);
+ } else {
+ secp256k1_scalar_set_b32(r, &sig->data[0], NULL);
+ secp256k1_scalar_set_b32(s, &sig->data[32], NULL);
+ }
+}
+
+static void secp256k1_ecdsa_signature_save(secp256k1_ecdsa_signature* sig, const secp256k1_scalar* r, const secp256k1_scalar* s) {
+ if (sizeof(secp256k1_scalar) == 32) {
+ memcpy(&sig->data[0], r, 32);
+ memcpy(&sig->data[32], s, 32);
+ } else {
+ secp256k1_scalar_get_b32(&sig->data[0], r);
+ secp256k1_scalar_get_b32(&sig->data[32], s);
+ }
+}
+
+int secp256k1_ecdsa_signature_parse_der(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input, size_t inputlen) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input != NULL);
+
+ if (secp256k1_ecdsa_sig_parse(&r, &s, input, inputlen)) {
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ return 1;
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ return 0;
+ }
+}
+
+int secp256k1_ecdsa_signature_parse_compact(const secp256k1_context* ctx, secp256k1_ecdsa_signature* sig, const unsigned char *input64) {
+ secp256k1_scalar r, s;
+ int ret = 1;
+ int overflow = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(input64 != NULL);
+
+ secp256k1_scalar_set_b32(&r, &input64[0], &overflow);
+ ret &= !overflow;
+ secp256k1_scalar_set_b32(&s, &input64[32], &overflow);
+ ret &= !overflow;
+ if (ret) {
+ secp256k1_ecdsa_signature_save(sig, &r, &s);
+ } else {
+ memset(sig, 0, sizeof(*sig));
+ }
+ return ret;
+}
+
+int secp256k1_ecdsa_signature_serialize_der(const secp256k1_context* ctx, unsigned char *output, size_t *outputlen, const secp256k1_ecdsa_signature* sig) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output != NULL);
+ ARG_CHECK(outputlen != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return secp256k1_ecdsa_sig_serialize(output, outputlen, &r, &s);
+}
+
+int secp256k1_ecdsa_signature_serialize_compact(const secp256k1_context* ctx, unsigned char *output64, const secp256k1_ecdsa_signature* sig) {
+ secp256k1_scalar r, s;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(output64 != NULL);
+ ARG_CHECK(sig != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ secp256k1_scalar_get_b32(&output64[0], &r);
+ secp256k1_scalar_get_b32(&output64[32], &s);
+ return 1;
+}
+
+int secp256k1_ecdsa_signature_normalize(const secp256k1_context* ctx, secp256k1_ecdsa_signature *sigout, const secp256k1_ecdsa_signature *sigin) {
+ secp256k1_scalar r, s;
+ int ret = 0;
+
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(sigin != NULL);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sigin);
+ ret = secp256k1_scalar_is_high(&s);
+ if (sigout != NULL) {
+ if (ret) {
+ secp256k1_scalar_negate(&s, &s);
+ }
+ secp256k1_ecdsa_signature_save(sigout, &r, &s);
+ }
+
+ return ret;
+}
+
+int secp256k1_ecdsa_verify(const secp256k1_context* ctx, const secp256k1_ecdsa_signature *sig, const unsigned char *msg32, const secp256k1_pubkey *pubkey) {
+ secp256k1_ge q;
+ secp256k1_scalar r, s;
+ secp256k1_scalar m;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(sig != NULL);
+ ARG_CHECK(pubkey != NULL);
+
+ secp256k1_scalar_set_b32(&m, msg32, NULL);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, sig);
+ return (!secp256k1_scalar_is_high(&s) &&
+ secp256k1_pubkey_load(ctx, &q, pubkey) &&
+ secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &r, &s, &q, &m));
+}
+
+static int nonce_function_rfc6979(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ unsigned char keydata[112];
+ int keylen = 64;
+ secp256k1_rfc6979_hmac_sha256_t rng;
+ unsigned int i;
+ /* We feed a byte array to the PRNG as input, consisting of:
+ * - the private key (32 bytes) and message (32 bytes), see RFC 6979 3.2d.
+ * - optionally 32 extra bytes of data, see RFC 6979 3.6 Additional Data.
+ * - optionally 16 extra bytes with the algorithm name.
+ * Because the arguments have distinct fixed lengths it is not possible for
+ * different argument mixtures to emulate each other and result in the same
+ * nonces.
+ */
+ memcpy(keydata, key32, 32);
+ memcpy(keydata + 32, msg32, 32);
+ if (data != NULL) {
+ memcpy(keydata + 64, data, 32);
+ keylen = 96;
+ }
+ if (algo16 != NULL) {
+ memcpy(keydata + keylen, algo16, 16);
+ keylen += 16;
+ }
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, keydata, keylen);
+ memset(keydata, 0, sizeof(keydata));
+ for (i = 0; i <= counter; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, nonce32, 32);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+ return 1;
+}
+
+const secp256k1_nonce_function secp256k1_nonce_function_rfc6979 = nonce_function_rfc6979;
+const secp256k1_nonce_function secp256k1_nonce_function_default = nonce_function_rfc6979;
+
+int secp256k1_ecdsa_sign(const secp256k1_context* ctx, secp256k1_ecdsa_signature *signature, const unsigned char *msg32, const unsigned char *seckey, secp256k1_nonce_function noncefp, const void* noncedata) {
+ secp256k1_scalar r, s;
+ secp256k1_scalar sec, non, msg;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(msg32 != NULL);
+ ARG_CHECK(signature != NULL);
+ ARG_CHECK(seckey != NULL);
+ if (noncefp == NULL) {
+ noncefp = secp256k1_nonce_function_default;
+ }
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ /* Fail if the secret key is invalid. */
+ if (!overflow && !secp256k1_scalar_is_zero(&sec)) {
+ unsigned char nonce32[32];
+ unsigned int count = 0;
+ secp256k1_scalar_set_b32(&msg, msg32, NULL);
+ while (1) {
+ ret = noncefp(nonce32, msg32, seckey, NULL, (void*)noncedata, count);
+ if (!ret) {
+ break;
+ }
+ secp256k1_scalar_set_b32(&non, nonce32, &overflow);
+ if (!overflow && !secp256k1_scalar_is_zero(&non)) {
+ if (secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, &r, &s, &sec, &msg, &non, NULL)) {
+ break;
+ }
+ }
+ count++;
+ }
+ memset(nonce32, 0, 32);
+ secp256k1_scalar_clear(&msg);
+ secp256k1_scalar_clear(&non);
+ secp256k1_scalar_clear(&sec);
+ }
+ if (ret) {
+ secp256k1_ecdsa_signature_save(signature, &r, &s);
+ } else {
+ memset(signature, 0, sizeof(*signature));
+ }
+ return ret;
+}
+
+int secp256k1_ec_seckey_verify(const secp256k1_context* ctx, const unsigned char *seckey) {
+ secp256k1_scalar sec;
+ int ret;
+ int overflow;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = !overflow && !secp256k1_scalar_is_zero(&sec);
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_create(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *seckey) {
+ secp256k1_gej pj;
+ secp256k1_ge p;
+ secp256k1_scalar sec;
+ int overflow;
+ int ret = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(pubkey != NULL);
+ memset(pubkey, 0, sizeof(*pubkey));
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ ARG_CHECK(seckey != NULL);
+
+ secp256k1_scalar_set_b32(&sec, seckey, &overflow);
+ ret = (!overflow) & (!secp256k1_scalar_is_zero(&sec));
+ if (ret) {
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pj, &sec);
+ secp256k1_ge_set_gej(&p, &pj);
+ secp256k1_pubkey_save(pubkey, &p);
+ }
+ secp256k1_scalar_clear(&sec);
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_add(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar term;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+
+ ret = !overflow && secp256k1_eckey_privkey_tweak_add(&sec, &term);
+ memset(seckey, 0, 32);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&term);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_add(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar term;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&term, tweak, &overflow);
+ ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
+ memset(pubkey, 0, sizeof(*pubkey));
+ if (ret) {
+ if (secp256k1_eckey_pubkey_tweak_add(&ctx->ecmult_ctx, &p, &term)) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_ec_privkey_tweak_mul(const secp256k1_context* ctx, unsigned char *seckey, const unsigned char *tweak) {
+ secp256k1_scalar factor;
+ secp256k1_scalar sec;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(seckey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ secp256k1_scalar_set_b32(&sec, seckey, NULL);
+ ret = !overflow && secp256k1_eckey_privkey_tweak_mul(&sec, &factor);
+ memset(seckey, 0, 32);
+ if (ret) {
+ secp256k1_scalar_get_b32(seckey, &sec);
+ }
+
+ secp256k1_scalar_clear(&sec);
+ secp256k1_scalar_clear(&factor);
+ return ret;
+}
+
+int secp256k1_ec_pubkey_tweak_mul(const secp256k1_context* ctx, secp256k1_pubkey *pubkey, const unsigned char *tweak) {
+ secp256k1_ge p;
+ secp256k1_scalar factor;
+ int ret = 0;
+ int overflow = 0;
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_context_is_built(&ctx->ecmult_ctx));
+ ARG_CHECK(pubkey != NULL);
+ ARG_CHECK(tweak != NULL);
+
+ secp256k1_scalar_set_b32(&factor, tweak, &overflow);
+ ret = !overflow && secp256k1_pubkey_load(ctx, &p, pubkey);
+ memset(pubkey, 0, sizeof(*pubkey));
+ if (ret) {
+ if (secp256k1_eckey_pubkey_tweak_mul(&ctx->ecmult_ctx, &p, &factor)) {
+ secp256k1_pubkey_save(pubkey, &p);
+ } else {
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+int secp256k1_context_randomize(secp256k1_context* ctx, const unsigned char *seed32) {
+ VERIFY_CHECK(ctx != NULL);
+ ARG_CHECK(secp256k1_ecmult_gen_context_is_built(&ctx->ecmult_gen_ctx));
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ return 1;
+}
+
+int secp256k1_ec_pubkey_combine(const secp256k1_context* ctx, secp256k1_pubkey *pubnonce, const secp256k1_pubkey * const *pubnonces, size_t n) {
+ size_t i;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+
+ ARG_CHECK(pubnonce != NULL);
+ memset(pubnonce, 0, sizeof(*pubnonce));
+ ARG_CHECK(n >= 1);
+ ARG_CHECK(pubnonces != NULL);
+
+ secp256k1_gej_set_infinity(&Qj);
+
+ for (i = 0; i < n; i++) {
+ secp256k1_pubkey_load(ctx, &Q, pubnonces[i]);
+ secp256k1_gej_add_ge(&Qj, &Qj, &Q);
+ }
+ if (secp256k1_gej_is_infinity(&Qj)) {
+ return 0;
+ }
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(pubnonce, &Q);
+ return 1;
+}
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+# include "modules/schnorr/main_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/main_impl.h"
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/testrand.h b/crypto/secp256k1/libsecp256k1/src/testrand.h
new file mode 100644
index 000000000..f8efa93c7
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/testrand.h
@@ -0,0 +1,38 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_TESTRAND_H_
+#define _SECP256K1_TESTRAND_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+/* A non-cryptographic RNG used only for test infrastructure. */
+
+/** Seed the pseudorandom number generator for testing. */
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16);
+
+/** Generate a pseudorandom number in the range [0..2**32-1]. */
+static uint32_t secp256k1_rand32(void);
+
+/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
+ * more. */
+static uint32_t secp256k1_rand_bits(int bits);
+
+/** Generate a pseudorandom number in the range [0..range-1]. */
+static uint32_t secp256k1_rand_int(uint32_t range);
+
+/** Generate a pseudorandom 32-byte array. */
+static void secp256k1_rand256(unsigned char *b32);
+
+/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
+static void secp256k1_rand256_test(unsigned char *b32);
+
+/** Generate pseudorandom bytes with long sequences of zero and one bits. */
+static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len);
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/testrand_impl.h b/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
new file mode 100644
index 000000000..15c7b9f12
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/testrand_impl.h
@@ -0,0 +1,110 @@
+/**********************************************************************
+ * Copyright (c) 2013-2015 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_TESTRAND_IMPL_H_
+#define _SECP256K1_TESTRAND_IMPL_H_
+
+#include
+#include
+
+#include "testrand.h"
+#include "hash.h"
+
+static secp256k1_rfc6979_hmac_sha256_t secp256k1_test_rng;
+static uint32_t secp256k1_test_rng_precomputed[8];
+static int secp256k1_test_rng_precomputed_used = 8;
+static uint64_t secp256k1_test_rng_integer;
+static int secp256k1_test_rng_integer_bits_left = 0;
+
+SECP256K1_INLINE static void secp256k1_rand_seed(const unsigned char *seed16) {
+ secp256k1_rfc6979_hmac_sha256_initialize(&secp256k1_test_rng, seed16, 16);
+}
+
+SECP256K1_INLINE static uint32_t secp256k1_rand32(void) {
+ if (secp256k1_test_rng_precomputed_used == 8) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, (unsigned char*)(&secp256k1_test_rng_precomputed[0]), sizeof(secp256k1_test_rng_precomputed));
+ secp256k1_test_rng_precomputed_used = 0;
+ }
+ return secp256k1_test_rng_precomputed[secp256k1_test_rng_precomputed_used++];
+}
+
+static uint32_t secp256k1_rand_bits(int bits) {
+ uint32_t ret;
+ if (secp256k1_test_rng_integer_bits_left < bits) {
+ secp256k1_test_rng_integer |= (((uint64_t)secp256k1_rand32()) << secp256k1_test_rng_integer_bits_left);
+ secp256k1_test_rng_integer_bits_left += 32;
+ }
+ ret = secp256k1_test_rng_integer;
+ secp256k1_test_rng_integer >>= bits;
+ secp256k1_test_rng_integer_bits_left -= bits;
+ ret &= ((~((uint32_t)0)) >> (32 - bits));
+ return ret;
+}
+
+static uint32_t secp256k1_rand_int(uint32_t range) {
+ /* We want a uniform integer between 0 and range-1, inclusive.
+ * B is the smallest number such that range <= 2**B.
+ * two mechanisms implemented here:
+ * - generate B bits numbers until one below range is found, and return it
+ * - find the largest multiple M of range that is <= 2**(B+A), generate B+A
+ * bits numbers until one below M is found, and return it modulo range
+ * The second mechanism consumes A more bits of entropy in every iteration,
+ * but may need fewer iterations due to M being closer to 2**(B+A) then
+ * range is to 2**B. The array below (indexed by B) contains a 0 when the
+ * first mechanism is to be used, and the number A otherwise.
+ */
+ static const int addbits[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 0};
+ uint32_t trange, mult;
+ int bits = 0;
+ if (range <= 1) {
+ return 0;
+ }
+ trange = range - 1;
+ while (trange > 0) {
+ trange >>= 1;
+ bits++;
+ }
+ if (addbits[bits]) {
+ bits = bits + addbits[bits];
+ mult = ((~((uint32_t)0)) >> (32 - bits)) / range;
+ trange = range * mult;
+ } else {
+ trange = range;
+ mult = 1;
+ }
+ while(1) {
+ uint32_t x = secp256k1_rand_bits(bits);
+ if (x < trange) {
+ return (mult == 1) ? x : (x % range);
+ }
+ }
+}
+
+static void secp256k1_rand256(unsigned char *b32) {
+ secp256k1_rfc6979_hmac_sha256_generate(&secp256k1_test_rng, b32, 32);
+}
+
+static void secp256k1_rand_bytes_test(unsigned char *bytes, size_t len) {
+ size_t bits = 0;
+ memset(bytes, 0, len);
+ while (bits < len * 8) {
+ int now;
+ uint32_t val;
+ now = 1 + (secp256k1_rand_bits(6) * secp256k1_rand_bits(5) + 16) / 31;
+ val = secp256k1_rand_bits(1);
+ while (now > 0 && bits < len * 8) {
+ bytes[bits / 8] |= val << (bits % 8);
+ now--;
+ bits++;
+ }
+ }
+}
+
+static void secp256k1_rand256_test(unsigned char *b32) {
+ secp256k1_rand_bytes_test(b32, 32);
+}
+
+#endif
diff --git a/crypto/secp256k1/libsecp256k1/src/tests.c b/crypto/secp256k1/libsecp256k1/src/tests.c
new file mode 100644
index 000000000..9ae7d3028
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/tests.c
@@ -0,0 +1,4525 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014, 2015 Pieter Wuille, Gregory Maxwell *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+
+#include
+
+#include "secp256k1.c"
+#include "include/secp256k1.h"
+#include "testrand_impl.h"
+
+#ifdef ENABLE_OPENSSL_TESTS
+#include "openssl/bn.h"
+#include "openssl/ec.h"
+#include "openssl/ecdsa.h"
+#include "openssl/obj_mac.h"
+#endif
+
+#include "contrib/lax_der_parsing.c"
+#include "contrib/lax_der_privatekey_parsing.c"
+
+#if !defined(VG_CHECK)
+# if defined(VALGRIND)
+# include
+# define VG_UNDEF(x,y) VALGRIND_MAKE_MEM_UNDEFINED((x),(y))
+# define VG_CHECK(x,y) VALGRIND_CHECK_MEM_IS_DEFINED((x),(y))
+# else
+# define VG_UNDEF(x,y)
+# define VG_CHECK(x,y)
+# endif
+#endif
+
+static int count = 64;
+static secp256k1_context *ctx = NULL;
+
+static void counting_illegal_callback_fn(const char* str, void* data) {
+ /* Dummy callback function that just counts. */
+ int32_t *p;
+ (void)str;
+ p = data;
+ (*p)++;
+}
+
+static void uncounting_illegal_callback_fn(const char* str, void* data) {
+ /* Dummy callback function that just counts (backwards). */
+ int32_t *p;
+ (void)str;
+ p = data;
+ (*p)--;
+}
+
+void random_field_element_test(secp256k1_fe *fe) {
+ do {
+ unsigned char b32[32];
+ secp256k1_rand256_test(b32);
+ if (secp256k1_fe_set_b32(fe, b32)) {
+ break;
+ }
+ } while(1);
+}
+
+void random_field_element_magnitude(secp256k1_fe *fe) {
+ secp256k1_fe zero;
+ int n = secp256k1_rand_int(9);
+ secp256k1_fe_normalize(fe);
+ if (n == 0) {
+ return;
+ }
+ secp256k1_fe_clear(&zero);
+ secp256k1_fe_negate(&zero, &zero, 0);
+ secp256k1_fe_mul_int(&zero, n - 1);
+ secp256k1_fe_add(fe, &zero);
+ VERIFY_CHECK(fe->magnitude == n);
+}
+
+void random_group_element_test(secp256k1_ge *ge) {
+ secp256k1_fe fe;
+ do {
+ random_field_element_test(&fe);
+ if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_rand_bits(1))) {
+ secp256k1_fe_normalize(&ge->y);
+ break;
+ }
+ } while(1);
+}
+
+void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
+ secp256k1_fe z2, z3;
+ do {
+ random_field_element_test(&gej->z);
+ if (!secp256k1_fe_is_zero(&gej->z)) {
+ break;
+ }
+ } while(1);
+ secp256k1_fe_sqr(&z2, &gej->z);
+ secp256k1_fe_mul(&z3, &z2, &gej->z);
+ secp256k1_fe_mul(&gej->x, &ge->x, &z2);
+ secp256k1_fe_mul(&gej->y, &ge->y, &z3);
+ gej->infinity = ge->infinity;
+}
+
+void random_scalar_order_test(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256_test(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void random_scalar_order(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ secp256k1_rand256(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+void run_context_tests(void) {
+ secp256k1_pubkey pubkey;
+ secp256k1_ecdsa_signature sig;
+ unsigned char ctmp[32];
+ int32_t ecount;
+ int32_t ecount2;
+ secp256k1_context *none = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
+ secp256k1_context *sign = secp256k1_context_create(SECP256K1_CONTEXT_SIGN);
+ secp256k1_context *vrfy = secp256k1_context_create(SECP256K1_CONTEXT_VERIFY);
+ secp256k1_context *both = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar msg, key, nonce;
+ secp256k1_scalar sigr, sigs;
+
+ ecount = 0;
+ ecount2 = 10;
+ secp256k1_context_set_illegal_callback(vrfy, counting_illegal_callback_fn, &ecount);
+ secp256k1_context_set_illegal_callback(sign, counting_illegal_callback_fn, &ecount2);
+ secp256k1_context_set_error_callback(sign, counting_illegal_callback_fn, NULL);
+ CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+
+ /*** clone and destroy all of them to make sure cloning was complete ***/
+ {
+ secp256k1_context *ctx_tmp;
+
+ ctx_tmp = none; none = secp256k1_context_clone(none); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = sign; sign = secp256k1_context_clone(sign); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = vrfy; vrfy = secp256k1_context_clone(vrfy); secp256k1_context_destroy(ctx_tmp);
+ ctx_tmp = both; both = secp256k1_context_clone(both); secp256k1_context_destroy(ctx_tmp);
+ }
+
+ /* Verify that the error callback makes it across the clone. */
+ CHECK(vrfy->error_callback.fn != sign->error_callback.fn);
+ /* And that it resets back to default. */
+ secp256k1_context_set_error_callback(sign, NULL, NULL);
+ CHECK(vrfy->error_callback.fn == sign->error_callback.fn);
+
+ /*** attempt to use them ***/
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&both->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+
+ /* Verify context-type checking illegal-argument errors. */
+ memset(ctmp, 1, 32);
+ CHECK(secp256k1_ec_pubkey_create(vrfy, &pubkey, ctmp) == 0);
+ CHECK(ecount == 1);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(sign, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ecdsa_sign(vrfy, &sig, ctmp, ctmp, NULL, NULL) == 0);
+ CHECK(ecount == 2);
+ VG_UNDEF(&sig, sizeof(sig));
+ CHECK(secp256k1_ecdsa_sign(sign, &sig, ctmp, ctmp, NULL, NULL) == 1);
+ VG_CHECK(&sig, sizeof(sig));
+ CHECK(ecount2 == 10);
+ CHECK(secp256k1_ecdsa_verify(sign, &sig, ctmp, &pubkey) == 0);
+ CHECK(ecount2 == 11);
+ CHECK(secp256k1_ecdsa_verify(vrfy, &sig, ctmp, &pubkey) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_tweak_add(sign, &pubkey, ctmp) == 0);
+ CHECK(ecount2 == 12);
+ CHECK(secp256k1_ec_pubkey_tweak_add(vrfy, &pubkey, ctmp) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(sign, &pubkey, ctmp) == 0);
+ CHECK(ecount2 == 13);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(vrfy, &pubkey, ctmp) == 1);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_context_randomize(vrfy, ctmp) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_context_randomize(sign, NULL) == 1);
+ CHECK(ecount2 == 13);
+ secp256k1_context_set_illegal_callback(vrfy, NULL, NULL);
+ secp256k1_context_set_illegal_callback(sign, NULL, NULL);
+
+ /* This shouldn't leak memory, due to already-set tests. */
+ secp256k1_ecmult_gen_context_build(&sign->ecmult_gen_ctx, NULL);
+ secp256k1_ecmult_context_build(&vrfy->ecmult_ctx, NULL);
+
+ /* obtain a working nonce */
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try signing */
+ CHECK(secp256k1_ecdsa_sig_sign(&sign->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+ CHECK(secp256k1_ecdsa_sig_sign(&both->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
+
+ /* try verifying */
+ CHECK(secp256k1_ecdsa_sig_verify(&vrfy->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ CHECK(secp256k1_ecdsa_sig_verify(&both->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+
+ /* cleanup */
+ secp256k1_context_destroy(none);
+ secp256k1_context_destroy(sign);
+ secp256k1_context_destroy(vrfy);
+ secp256k1_context_destroy(both);
+ /* Defined as no-op. */
+ secp256k1_context_destroy(NULL);
+}
+
+/***** HASH TESTS *****/
+
+void run_sha256_tests(void) {
+ static const char *inputs[8] = {
+ "", "abc", "message digest", "secure hash algorithm", "SHA256 is considered to be safe",
+ "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
+ "For this sample, this 63-byte string will be used as input data",
+ "This is exactly 64 bytes long, not counting the terminating byte"
+ };
+ static const unsigned char outputs[8][32] = {
+ {0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55},
+ {0xba, 0x78, 0x16, 0xbf, 0x8f, 0x01, 0xcf, 0xea, 0x41, 0x41, 0x40, 0xde, 0x5d, 0xae, 0x22, 0x23, 0xb0, 0x03, 0x61, 0xa3, 0x96, 0x17, 0x7a, 0x9c, 0xb4, 0x10, 0xff, 0x61, 0xf2, 0x00, 0x15, 0xad},
+ {0xf7, 0x84, 0x6f, 0x55, 0xcf, 0x23, 0xe1, 0x4e, 0xeb, 0xea, 0xb5, 0xb4, 0xe1, 0x55, 0x0c, 0xad, 0x5b, 0x50, 0x9e, 0x33, 0x48, 0xfb, 0xc4, 0xef, 0xa3, 0xa1, 0x41, 0x3d, 0x39, 0x3c, 0xb6, 0x50},
+ {0xf3, 0x0c, 0xeb, 0x2b, 0xb2, 0x82, 0x9e, 0x79, 0xe4, 0xca, 0x97, 0x53, 0xd3, 0x5a, 0x8e, 0xcc, 0x00, 0x26, 0x2d, 0x16, 0x4c, 0xc0, 0x77, 0x08, 0x02, 0x95, 0x38, 0x1c, 0xbd, 0x64, 0x3f, 0x0d},
+ {0x68, 0x19, 0xd9, 0x15, 0xc7, 0x3f, 0x4d, 0x1e, 0x77, 0xe4, 0xe1, 0xb5, 0x2d, 0x1f, 0xa0, 0xf9, 0xcf, 0x9b, 0xea, 0xea, 0xd3, 0x93, 0x9f, 0x15, 0x87, 0x4b, 0xd9, 0x88, 0xe2, 0xa2, 0x36, 0x30},
+ {0x24, 0x8d, 0x6a, 0x61, 0xd2, 0x06, 0x38, 0xb8, 0xe5, 0xc0, 0x26, 0x93, 0x0c, 0x3e, 0x60, 0x39, 0xa3, 0x3c, 0xe4, 0x59, 0x64, 0xff, 0x21, 0x67, 0xf6, 0xec, 0xed, 0xd4, 0x19, 0xdb, 0x06, 0xc1},
+ {0xf0, 0x8a, 0x78, 0xcb, 0xba, 0xee, 0x08, 0x2b, 0x05, 0x2a, 0xe0, 0x70, 0x8f, 0x32, 0xfa, 0x1e, 0x50, 0xc5, 0xc4, 0x21, 0xaa, 0x77, 0x2b, 0xa5, 0xdb, 0xb4, 0x06, 0xa2, 0xea, 0x6b, 0xe3, 0x42},
+ {0xab, 0x64, 0xef, 0xf7, 0xe8, 0x8e, 0x2e, 0x46, 0x16, 0x5e, 0x29, 0xf2, 0xbc, 0xe4, 0x18, 0x26, 0xbd, 0x4c, 0x7b, 0x35, 0x52, 0xf6, 0xb3, 0x82, 0xa9, 0xe7, 0xd3, 0xaf, 0x47, 0xc2, 0x45, 0xf8}
+ };
+ int i;
+ for (i = 0; i < 8; i++) {
+ unsigned char out[32];
+ secp256k1_sha256_t hasher;
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand_int(strlen(inputs[i]));
+ secp256k1_sha256_initialize(&hasher);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_hmac_sha256_tests(void) {
+ static const char *keys[6] = {
+ "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+ "\x4a\x65\x66\x65",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+ "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+ };
+ static const char *inputs[6] = {
+ "\x48\x69\x20\x54\x68\x65\x72\x65",
+ "\x77\x68\x61\x74\x20\x64\x6f\x20\x79\x61\x20\x77\x61\x6e\x74\x20\x66\x6f\x72\x20\x6e\x6f\x74\x68\x69\x6e\x67\x3f",
+ "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+ "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+ "\x54\x65\x73\x74\x20\x55\x73\x69\x6e\x67\x20\x4c\x61\x72\x67\x65\x72\x20\x54\x68\x61\x6e\x20\x42\x6c\x6f\x63\x6b\x2d\x53\x69\x7a\x65\x20\x4b\x65\x79\x20\x2d\x20\x48\x61\x73\x68\x20\x4b\x65\x79\x20\x46\x69\x72\x73\x74",
+ "\x54\x68\x69\x73\x20\x69\x73\x20\x61\x20\x74\x65\x73\x74\x20\x75\x73\x69\x6e\x67\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x6b\x65\x79\x20\x61\x6e\x64\x20\x61\x20\x6c\x61\x72\x67\x65\x72\x20\x74\x68\x61\x6e\x20\x62\x6c\x6f\x63\x6b\x2d\x73\x69\x7a\x65\x20\x64\x61\x74\x61\x2e\x20\x54\x68\x65\x20\x6b\x65\x79\x20\x6e\x65\x65\x64\x73\x20\x74\x6f\x20\x62\x65\x20\x68\x61\x73\x68\x65\x64\x20\x62\x65\x66\x6f\x72\x65\x20\x62\x65\x69\x6e\x67\x20\x75\x73\x65\x64\x20\x62\x79\x20\x74\x68\x65\x20\x48\x4d\x41\x43\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d\x2e"
+ };
+ static const unsigned char outputs[6][32] = {
+ {0xb0, 0x34, 0x4c, 0x61, 0xd8, 0xdb, 0x38, 0x53, 0x5c, 0xa8, 0xaf, 0xce, 0xaf, 0x0b, 0xf1, 0x2b, 0x88, 0x1d, 0xc2, 0x00, 0xc9, 0x83, 0x3d, 0xa7, 0x26, 0xe9, 0x37, 0x6c, 0x2e, 0x32, 0xcf, 0xf7},
+ {0x5b, 0xdc, 0xc1, 0x46, 0xbf, 0x60, 0x75, 0x4e, 0x6a, 0x04, 0x24, 0x26, 0x08, 0x95, 0x75, 0xc7, 0x5a, 0x00, 0x3f, 0x08, 0x9d, 0x27, 0x39, 0x83, 0x9d, 0xec, 0x58, 0xb9, 0x64, 0xec, 0x38, 0x43},
+ {0x77, 0x3e, 0xa9, 0x1e, 0x36, 0x80, 0x0e, 0x46, 0x85, 0x4d, 0xb8, 0xeb, 0xd0, 0x91, 0x81, 0xa7, 0x29, 0x59, 0x09, 0x8b, 0x3e, 0xf8, 0xc1, 0x22, 0xd9, 0x63, 0x55, 0x14, 0xce, 0xd5, 0x65, 0xfe},
+ {0x82, 0x55, 0x8a, 0x38, 0x9a, 0x44, 0x3c, 0x0e, 0xa4, 0xcc, 0x81, 0x98, 0x99, 0xf2, 0x08, 0x3a, 0x85, 0xf0, 0xfa, 0xa3, 0xe5, 0x78, 0xf8, 0x07, 0x7a, 0x2e, 0x3f, 0xf4, 0x67, 0x29, 0x66, 0x5b},
+ {0x60, 0xe4, 0x31, 0x59, 0x1e, 0xe0, 0xb6, 0x7f, 0x0d, 0x8a, 0x26, 0xaa, 0xcb, 0xf5, 0xb7, 0x7f, 0x8e, 0x0b, 0xc6, 0x21, 0x37, 0x28, 0xc5, 0x14, 0x05, 0x46, 0x04, 0x0f, 0x0e, 0xe3, 0x7f, 0x54},
+ {0x9b, 0x09, 0xff, 0xa7, 0x1b, 0x94, 0x2f, 0xcb, 0x27, 0x63, 0x5f, 0xbc, 0xd5, 0xb0, 0xe9, 0x44, 0xbf, 0xdc, 0x63, 0x64, 0x4f, 0x07, 0x13, 0x93, 0x8a, 0x7f, 0x51, 0x53, 0x5c, 0x3a, 0x35, 0xe2}
+ };
+ int i;
+ for (i = 0; i < 6; i++) {
+ secp256k1_hmac_sha256_t hasher;
+ unsigned char out[32];
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), strlen(inputs[i]));
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ if (strlen(inputs[i]) > 0) {
+ int split = secp256k1_rand_int(strlen(inputs[i]));
+ secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
+ secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
+ secp256k1_hmac_sha256_finalize(&hasher, out);
+ CHECK(memcmp(out, outputs[i], 32) == 0);
+ }
+ }
+}
+
+void run_rfc6979_hmac_sha256_tests(void) {
+ static const unsigned char key1[65] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x4b, 0xf5, 0x12, 0x2f, 0x34, 0x45, 0x54, 0xc5, 0x3b, 0xde, 0x2e, 0xbb, 0x8c, 0xd2, 0xb7, 0xe3, 0xd1, 0x60, 0x0a, 0xd6, 0x31, 0xc3, 0x85, 0xa5, 0xd7, 0xcc, 0xe2, 0x3c, 0x77, 0x85, 0x45, 0x9a, 0};
+ static const unsigned char out1[3][32] = {
+ {0x4f, 0xe2, 0x95, 0x25, 0xb2, 0x08, 0x68, 0x09, 0x15, 0x9a, 0xcd, 0xf0, 0x50, 0x6e, 0xfb, 0x86, 0xb0, 0xec, 0x93, 0x2c, 0x7b, 0xa4, 0x42, 0x56, 0xab, 0x32, 0x1e, 0x42, 0x1e, 0x67, 0xe9, 0xfb},
+ {0x2b, 0xf0, 0xff, 0xf1, 0xd3, 0xc3, 0x78, 0xa2, 0x2d, 0xc5, 0xde, 0x1d, 0x85, 0x65, 0x22, 0x32, 0x5c, 0x65, 0xb5, 0x04, 0x49, 0x1a, 0x0c, 0xbd, 0x01, 0xcb, 0x8f, 0x3a, 0xa6, 0x7f, 0xfd, 0x4a},
+ {0xf5, 0x28, 0xb4, 0x10, 0xcb, 0x54, 0x1f, 0x77, 0x00, 0x0d, 0x7a, 0xfb, 0x6c, 0x5b, 0x53, 0xc5, 0xc4, 0x71, 0xea, 0xb4, 0x3e, 0x46, 0x6d, 0x9a, 0xc5, 0x19, 0x0c, 0x39, 0xc8, 0x2f, 0xd8, 0x2e}
+ };
+
+ static const unsigned char key2[64] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55};
+ static const unsigned char out2[3][32] = {
+ {0x9c, 0x23, 0x6c, 0x16, 0x5b, 0x82, 0xae, 0x0c, 0xd5, 0x90, 0x65, 0x9e, 0x10, 0x0b, 0x6b, 0xab, 0x30, 0x36, 0xe7, 0xba, 0x8b, 0x06, 0x74, 0x9b, 0xaf, 0x69, 0x81, 0xe1, 0x6f, 0x1a, 0x2b, 0x95},
+ {0xdf, 0x47, 0x10, 0x61, 0x62, 0x5b, 0xc0, 0xea, 0x14, 0xb6, 0x82, 0xfe, 0xee, 0x2c, 0x9c, 0x02, 0xf2, 0x35, 0xda, 0x04, 0x20, 0x4c, 0x1d, 0x62, 0xa1, 0x53, 0x6c, 0x6e, 0x17, 0xae, 0xd7, 0xa9},
+ {0x75, 0x97, 0x88, 0x7c, 0xbd, 0x76, 0x32, 0x1f, 0x32, 0xe3, 0x04, 0x40, 0x67, 0x9a, 0x22, 0xcf, 0x7f, 0x8d, 0x9d, 0x2e, 0xac, 0x39, 0x0e, 0x58, 0x1f, 0xea, 0x09, 0x1c, 0xe2, 0x02, 0xba, 0x94}
+ };
+
+ secp256k1_rfc6979_hmac_sha256_t rng;
+ unsigned char out[32];
+ int i;
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key1, 65);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out1[i], 32) != 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+
+ secp256k1_rfc6979_hmac_sha256_initialize(&rng, key2, 64);
+ for (i = 0; i < 3; i++) {
+ secp256k1_rfc6979_hmac_sha256_generate(&rng, out, 32);
+ CHECK(memcmp(out, out2[i], 32) == 0);
+ }
+ secp256k1_rfc6979_hmac_sha256_finalize(&rng);
+}
+
+/***** RANDOM TESTS *****/
+
+void test_rand_bits(int rand32, int bits) {
+ /* (1-1/2^B)^rounds[B] < 1/10^9, so rounds is the number of iterations to
+ * get a false negative chance below once in a billion */
+ static const unsigned int rounds[7] = {1, 30, 73, 156, 322, 653, 1316};
+ /* We try multiplying the results with various odd numbers, which shouldn't
+ * influence the uniform distribution modulo a power of 2. */
+ static const uint32_t mults[6] = {1, 3, 21, 289, 0x9999, 0x80402011};
+ /* We only select up to 6 bits from the output to analyse */
+ unsigned int usebits = bits > 6 ? 6 : bits;
+ unsigned int maxshift = bits - usebits;
+ /* For each of the maxshift+1 usebits-bit sequences inside a bits-bit
+ number, track all observed outcomes, one per bit in a uint64_t. */
+ uint64_t x[6][27] = {{0}};
+ unsigned int i, shift, m;
+ /* Multiply the output of all rand calls with the odd number m, which
+ should not change the uniformity of its distribution. */
+ for (i = 0; i < rounds[usebits]; i++) {
+ uint32_t r = (rand32 ? secp256k1_rand32() : secp256k1_rand_bits(bits));
+ CHECK((((uint64_t)r) >> bits) == 0);
+ for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
+ uint32_t rm = r * mults[m];
+ for (shift = 0; shift <= maxshift; shift++) {
+ x[m][shift] |= (((uint64_t)1) << ((rm >> shift) & ((1 << usebits) - 1)));
+ }
+ }
+ }
+ for (m = 0; m < sizeof(mults) / sizeof(mults[0]); m++) {
+ for (shift = 0; shift <= maxshift; shift++) {
+ /* Test that the lower usebits bits of x[shift] are 1 */
+ CHECK(((~x[m][shift]) << (64 - (1 << usebits))) == 0);
+ }
+ }
+}
+
+/* Subrange must be a whole divisor of range, and at most 64 */
+void test_rand_int(uint32_t range, uint32_t subrange) {
+ /* (1-1/subrange)^rounds < 1/10^9 */
+ int rounds = (subrange * 2073) / 100;
+ int i;
+ uint64_t x = 0;
+ CHECK((range % subrange) == 0);
+ for (i = 0; i < rounds; i++) {
+ uint32_t r = secp256k1_rand_int(range);
+ CHECK(r < range);
+ r = r % subrange;
+ x |= (((uint64_t)1) << r);
+ }
+ /* Test that the lower subrange bits of x are 1. */
+ CHECK(((~x) << (64 - subrange)) == 0);
+}
+
+void run_rand_bits(void) {
+ size_t b;
+ test_rand_bits(1, 32);
+ for (b = 1; b <= 32; b++) {
+ test_rand_bits(0, b);
+ }
+}
+
+void run_rand_int(void) {
+ static const uint32_t ms[] = {1, 3, 17, 1000, 13771, 999999, 33554432};
+ static const uint32_t ss[] = {1, 3, 6, 9, 13, 31, 64};
+ unsigned int m, s;
+ for (m = 0; m < sizeof(ms) / sizeof(ms[0]); m++) {
+ for (s = 0; s < sizeof(ss) / sizeof(ss[0]); s++) {
+ test_rand_int(ms[m] * ss[s], ss[s]);
+ }
+ }
+}
+
+/***** NUM TESTS *****/
+
+#ifndef USE_NUM_NONE
+void random_num_negate(secp256k1_num *num) {
+ if (secp256k1_rand_bits(1)) {
+ secp256k1_num_negate(num);
+ }
+}
+
+void random_num_order_test(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order_test(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void random_num_order(secp256k1_num *num) {
+ secp256k1_scalar sc;
+ random_scalar_order(&sc);
+ secp256k1_scalar_get_num(num, &sc);
+}
+
+void test_num_negate(void) {
+ secp256k1_num n1;
+ secp256k1_num n2;
+ random_num_order_test(&n1); /* n1 = R */
+ random_num_negate(&n1);
+ secp256k1_num_copy(&n2, &n1); /* n2 = R */
+ secp256k1_num_sub(&n1, &n2, &n1); /* n1 = n2-n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(!secp256k1_num_is_zero(&n1));
+ secp256k1_num_add(&n1, &n2, &n1); /* n1 = n2+n1 = 0 */
+ CHECK(secp256k1_num_is_zero(&n1));
+ secp256k1_num_copy(&n1, &n2); /* n1 = R */
+ secp256k1_num_negate(&n1); /* n1 = -R */
+ CHECK(secp256k1_num_is_neg(&n1) != secp256k1_num_is_neg(&n2));
+ secp256k1_num_negate(&n1); /* n1 = R */
+ CHECK(secp256k1_num_eq(&n1, &n2));
+}
+
+void test_num_add_sub(void) {
+ int i;
+ secp256k1_scalar s;
+ secp256k1_num n1;
+ secp256k1_num n2;
+ secp256k1_num n1p2, n2p1, n1m2, n2m1;
+ random_num_order_test(&n1); /* n1 = R1 */
+ if (secp256k1_rand_bits(1)) {
+ random_num_negate(&n1);
+ }
+ random_num_order_test(&n2); /* n2 = R2 */
+ if (secp256k1_rand_bits(1)) {
+ random_num_negate(&n2);
+ }
+ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = R1 + R2 */
+ secp256k1_num_add(&n2p1, &n2, &n1); /* n2p1 = R2 + R1 */
+ secp256k1_num_sub(&n1m2, &n1, &n2); /* n1m2 = R1 - R2 */
+ secp256k1_num_sub(&n2m1, &n2, &n1); /* n2m1 = R2 - R1 */
+ CHECK(secp256k1_num_eq(&n1p2, &n2p1));
+ CHECK(!secp256k1_num_eq(&n1p2, &n1m2));
+ secp256k1_num_negate(&n2m1); /* n2m1 = -R2 + R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1m2));
+ CHECK(!secp256k1_num_eq(&n2m1, &n1));
+ secp256k1_num_add(&n2m1, &n2m1, &n2); /* n2m1 = -R2 + R1 + R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2m1, &n1));
+ CHECK(!secp256k1_num_eq(&n2p1, &n1));
+ secp256k1_num_sub(&n2p1, &n2p1, &n2); /* n2p1 = R2 + R1 - R2 = R1 */
+ CHECK(secp256k1_num_eq(&n2p1, &n1));
+
+ /* check is_one */
+ secp256k1_scalar_set_int(&s, 1);
+ secp256k1_scalar_get_num(&n1, &s);
+ CHECK(secp256k1_num_is_one(&n1));
+ /* check that 2^n + 1 is never 1 */
+ secp256k1_scalar_get_num(&n2, &s);
+ for (i = 0; i < 250; ++i) {
+ secp256k1_num_add(&n1, &n1, &n1); /* n1 *= 2 */
+ secp256k1_num_add(&n1p2, &n1, &n2); /* n1p2 = n1 + 1 */
+ CHECK(!secp256k1_num_is_one(&n1p2));
+ }
+}
+
+void test_num_mod(void) {
+ int i;
+ secp256k1_scalar s;
+ secp256k1_num order, n;
+
+ /* check that 0 mod anything is 0 */
+ random_scalar_order_test(&s);
+ secp256k1_scalar_get_num(&order, &s);
+ secp256k1_scalar_set_int(&s, 0);
+ secp256k1_scalar_get_num(&n, &s);
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+
+ /* check that anything mod 1 is 0 */
+ secp256k1_scalar_set_int(&s, 1);
+ secp256k1_scalar_get_num(&order, &s);
+ secp256k1_scalar_get_num(&n, &s);
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+
+ /* check that increasing the number past 2^256 does not break this */
+ random_scalar_order_test(&s);
+ secp256k1_scalar_get_num(&n, &s);
+ /* multiply by 2^8, which'll test this case with high probability */
+ for (i = 0; i < 8; ++i) {
+ secp256k1_num_add(&n, &n, &n);
+ }
+ secp256k1_num_mod(&n, &order);
+ CHECK(secp256k1_num_is_zero(&n));
+}
+
+void test_num_jacobi(void) {
+ secp256k1_scalar sqr;
+ secp256k1_scalar small;
+ secp256k1_scalar five; /* five is not a quadratic residue */
+ secp256k1_num order, n;
+ int i;
+ /* squares mod 5 are 1, 4 */
+ const int jacobi5[10] = { 0, 1, -1, -1, 1, 0, 1, -1, -1, 1 };
+
+ /* check some small values with 5 as the order */
+ secp256k1_scalar_set_int(&five, 5);
+ secp256k1_scalar_get_num(&order, &five);
+ for (i = 0; i < 10; ++i) {
+ secp256k1_scalar_set_int(&small, i);
+ secp256k1_scalar_get_num(&n, &small);
+ CHECK(secp256k1_num_jacobi(&n, &order) == jacobi5[i]);
+ }
+
+ /** test large values with 5 as group order */
+ secp256k1_scalar_get_num(&order, &five);
+ /* we first need a scalar which is not a multiple of 5 */
+ do {
+ secp256k1_num fiven;
+ random_scalar_order_test(&sqr);
+ secp256k1_scalar_get_num(&fiven, &five);
+ secp256k1_scalar_get_num(&n, &sqr);
+ secp256k1_num_mod(&n, &fiven);
+ } while (secp256k1_num_is_zero(&n));
+ /* next force it to be a residue. 2 is a nonresidue mod 5 so we can
+ * just multiply by two, i.e. add the number to itself */
+ if (secp256k1_num_jacobi(&n, &order) == -1) {
+ secp256k1_num_add(&n, &n, &n);
+ }
+
+ /* test residue */
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1);
+ /* test nonresidue */
+ secp256k1_num_add(&n, &n, &n);
+ CHECK(secp256k1_num_jacobi(&n, &order) == -1);
+
+ /** test with secp group order as order */
+ secp256k1_scalar_order_get_num(&order);
+ random_scalar_order_test(&sqr);
+ secp256k1_scalar_sqr(&sqr, &sqr);
+ /* test residue */
+ secp256k1_scalar_get_num(&n, &sqr);
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1);
+ /* test nonresidue */
+ secp256k1_scalar_mul(&sqr, &sqr, &five);
+ secp256k1_scalar_get_num(&n, &sqr);
+ CHECK(secp256k1_num_jacobi(&n, &order) == -1);
+ /* test multiple of the order*/
+ CHECK(secp256k1_num_jacobi(&order, &order) == 0);
+
+ /* check one less than the order */
+ secp256k1_scalar_set_int(&small, 1);
+ secp256k1_scalar_get_num(&n, &small);
+ secp256k1_num_sub(&n, &order, &n);
+ CHECK(secp256k1_num_jacobi(&n, &order) == 1); /* sage confirms this is 1 */
+}
+
+void run_num_smalltests(void) {
+ int i;
+ for (i = 0; i < 100*count; i++) {
+ test_num_negate();
+ test_num_add_sub();
+ test_num_mod();
+ test_num_jacobi();
+ }
+}
+#endif
+
+/***** SCALAR TESTS *****/
+
+void scalar_test(void) {
+ secp256k1_scalar s;
+ secp256k1_scalar s1;
+ secp256k1_scalar s2;
+#ifndef USE_NUM_NONE
+ secp256k1_num snum, s1num, s2num;
+ secp256k1_num order, half_order;
+#endif
+ unsigned char c[32];
+
+ /* Set 's' to a random scalar, with value 'snum'. */
+ random_scalar_order_test(&s);
+
+ /* Set 's1' to a random scalar, with value 's1num'. */
+ random_scalar_order_test(&s1);
+
+ /* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */
+ random_scalar_order_test(&s2);
+ secp256k1_scalar_get_b32(c, &s2);
+
+#ifndef USE_NUM_NONE
+ secp256k1_scalar_get_num(&snum, &s);
+ secp256k1_scalar_get_num(&s1num, &s1);
+ secp256k1_scalar_get_num(&s2num, &s2);
+
+ secp256k1_scalar_order_get_num(&order);
+ half_order = order;
+ secp256k1_num_shift(&half_order, 1);
+#endif
+
+ {
+ int i;
+ /* Test that fetching groups of 4 bits from a scalar and recursing n(i)=16*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ secp256k1_scalar_set_int(&n, 0);
+ for (i = 0; i < 256; i += 4) {
+ secp256k1_scalar t;
+ int j;
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits(&s, 256 - 4 - i, 4));
+ for (j = 0; j < 4; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+ {
+ /* Test that fetching groups of randomly-sized bits from a scalar and recursing n(i)=b*n(i-1)+p(i) reconstructs it. */
+ secp256k1_scalar n;
+ int i = 0;
+ secp256k1_scalar_set_int(&n, 0);
+ while (i < 256) {
+ secp256k1_scalar t;
+ int j;
+ int now = secp256k1_rand_int(15) + 1;
+ if (now + i > 256) {
+ now = 256 - i;
+ }
+ secp256k1_scalar_set_int(&t, secp256k1_scalar_get_bits_var(&s, 256 - now - i, now));
+ for (j = 0; j < now; j++) {
+ secp256k1_scalar_add(&n, &n, &n);
+ }
+ secp256k1_scalar_add(&n, &n, &t);
+ i += now;
+ }
+ CHECK(secp256k1_scalar_eq(&n, &s));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* Test that adding the scalars together is equal to adding their numbers together modulo the order. */
+ secp256k1_num rnum;
+ secp256k1_num r2num;
+ secp256k1_scalar r;
+ secp256k1_num_add(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_add(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ }
+
+ {
+ /* Test that multiplying the scalars is equal to multiplying their numbers modulo the order. */
+ secp256k1_scalar r;
+ secp256k1_num r2num;
+ secp256k1_num rnum;
+ secp256k1_num_mul(&rnum, &snum, &s2num);
+ secp256k1_num_mod(&rnum, &order);
+ secp256k1_scalar_mul(&r, &s, &s2);
+ secp256k1_scalar_get_num(&r2num, &r);
+ CHECK(secp256k1_num_eq(&rnum, &r2num));
+ /* The result can only be zero if at least one of the factors was zero. */
+ CHECK(secp256k1_scalar_is_zero(&r) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_zero(&s2)));
+ /* The results can only be equal to one of the factors if that factor was zero, or the other factor was one. */
+ CHECK(secp256k1_num_eq(&rnum, &snum) == (secp256k1_scalar_is_zero(&s) || secp256k1_scalar_is_one(&s2)));
+ CHECK(secp256k1_num_eq(&rnum, &s2num) == (secp256k1_scalar_is_zero(&s2) || secp256k1_scalar_is_one(&s)));
+ }
+
+ {
+ secp256k1_scalar neg;
+ secp256k1_num negnum;
+ secp256k1_num negnum2;
+ /* Check that comparison with zero matches comparison with zero on the number. */
+ CHECK(secp256k1_num_is_zero(&snum) == secp256k1_scalar_is_zero(&s));
+ /* Check that comparison with the half order is equal to testing for high scalar. */
+ CHECK(secp256k1_scalar_is_high(&s) == (secp256k1_num_cmp(&snum, &half_order) > 0));
+ secp256k1_scalar_negate(&neg, &s);
+ secp256k1_num_sub(&negnum, &order, &snum);
+ secp256k1_num_mod(&negnum, &order);
+ /* Check that comparison with the half order is equal to testing for high scalar after negation. */
+ CHECK(secp256k1_scalar_is_high(&neg) == (secp256k1_num_cmp(&negnum, &half_order) > 0));
+ /* Negating should change the high property, unless the value was already zero. */
+ CHECK((secp256k1_scalar_is_high(&s) == secp256k1_scalar_is_high(&neg)) == secp256k1_scalar_is_zero(&s));
+ secp256k1_scalar_get_num(&negnum2, &neg);
+ /* Negating a scalar should be equal to (order - n) mod order on the number. */
+ CHECK(secp256k1_num_eq(&negnum, &negnum2));
+ secp256k1_scalar_add(&neg, &neg, &s);
+ /* Adding a number to its negation should result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ secp256k1_scalar_negate(&neg, &neg);
+ /* Negating zero should still result in zero. */
+ CHECK(secp256k1_scalar_is_zero(&neg));
+ }
+
+ {
+ /* Test secp256k1_scalar_mul_shift_var. */
+ secp256k1_scalar r;
+ secp256k1_num one;
+ secp256k1_num rnum;
+ secp256k1_num rnum2;
+ unsigned char cone[1] = {0x01};
+ unsigned int shift = 256 + secp256k1_rand_int(257);
+ secp256k1_scalar_mul_shift_var(&r, &s1, &s2, shift);
+ secp256k1_num_mul(&rnum, &s1num, &s2num);
+ secp256k1_num_shift(&rnum, shift - 1);
+ secp256k1_num_set_bin(&one, cone, 1);
+ secp256k1_num_add(&rnum, &rnum, &one);
+ secp256k1_num_shift(&rnum, 1);
+ secp256k1_scalar_get_num(&rnum2, &r);
+ CHECK(secp256k1_num_eq(&rnum, &rnum2));
+ }
+
+ {
+ /* test secp256k1_scalar_shr_int */
+ secp256k1_scalar r;
+ int i;
+ random_scalar_order_test(&r);
+ for (i = 0; i < 100; ++i) {
+ int low;
+ int shift = 1 + secp256k1_rand_int(15);
+ int expected = r.d[0] % (1 << shift);
+ low = secp256k1_scalar_shr_int(&r, shift);
+ CHECK(expected == low);
+ }
+ }
+#endif
+
+ {
+ /* Test that scalar inverses are equal to the inverse of their number modulo the order. */
+ if (!secp256k1_scalar_is_zero(&s)) {
+ secp256k1_scalar inv;
+#ifndef USE_NUM_NONE
+ secp256k1_num invnum;
+ secp256k1_num invnum2;
+#endif
+ secp256k1_scalar_inverse(&inv, &s);
+#ifndef USE_NUM_NONE
+ secp256k1_num_mod_inverse(&invnum, &snum, &order);
+ secp256k1_scalar_get_num(&invnum2, &inv);
+ CHECK(secp256k1_num_eq(&invnum, &invnum2));
+#endif
+ secp256k1_scalar_mul(&inv, &inv, &s);
+ /* Multiplying a scalar with its inverse must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+ secp256k1_scalar_inverse(&inv, &inv);
+ /* Inverting one must result in one. */
+ CHECK(secp256k1_scalar_is_one(&inv));
+#ifndef USE_NUM_NONE
+ secp256k1_scalar_get_num(&invnum, &inv);
+ CHECK(secp256k1_num_is_one(&invnum));
+#endif
+ }
+ }
+
+ {
+ /* Test commutativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar b;
+ int i;
+ /* Test add_bit. */
+ int bit = secp256k1_rand_bits(8);
+ secp256k1_scalar_set_int(&b, 1);
+ CHECK(secp256k1_scalar_is_one(&b));
+ for (i = 0; i < bit; i++) {
+ secp256k1_scalar_add(&b, &b, &b);
+ }
+ r1 = s1;
+ r2 = s1;
+ if (!secp256k1_scalar_add(&r1, &r1, &b)) {
+ /* No overflow happened. */
+ secp256k1_scalar_cadd_bit(&r2, bit, 1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ /* cadd is a noop when flag is zero */
+ secp256k1_scalar_cadd_bit(&r2, bit, 0);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+ }
+
+ {
+ /* Test commutativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r2, &s2, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of add. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_add(&r1, &r1, &s);
+ secp256k1_scalar_add(&r2, &s2, &s);
+ secp256k1_scalar_add(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test associativity of mul. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_mul(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s2, &s);
+ secp256k1_scalar_mul(&r2, &s1, &r2);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test distributitivity of mul over add. */
+ secp256k1_scalar r1, r2, t;
+ secp256k1_scalar_add(&r1, &s1, &s2);
+ secp256k1_scalar_mul(&r1, &r1, &s);
+ secp256k1_scalar_mul(&r2, &s1, &s);
+ secp256k1_scalar_mul(&t, &s2, &s);
+ secp256k1_scalar_add(&r2, &r2, &t);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test square. */
+ secp256k1_scalar r1, r2;
+ secp256k1_scalar_sqr(&r1, &s1);
+ secp256k1_scalar_mul(&r2, &s1, &s1);
+ CHECK(secp256k1_scalar_eq(&r1, &r2));
+ }
+
+ {
+ /* Test multiplicative identity. */
+ secp256k1_scalar r1, v1;
+ secp256k1_scalar_set_int(&v1,1);
+ secp256k1_scalar_mul(&r1, &s1, &v1);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test additive identity. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_add(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &s1));
+ }
+
+ {
+ /* Test zero product property. */
+ secp256k1_scalar r1, v0;
+ secp256k1_scalar_set_int(&v0,0);
+ secp256k1_scalar_mul(&r1, &s1, &v0);
+ CHECK(secp256k1_scalar_eq(&r1, &v0));
+ }
+
+}
+
+void run_scalar_tests(void) {
+ int i;
+ for (i = 0; i < 128 * count; i++) {
+ scalar_test();
+ }
+
+ {
+ /* (-1)+1 should be zero. */
+ secp256k1_scalar s, o;
+ secp256k1_scalar_set_int(&s, 1);
+ CHECK(secp256k1_scalar_is_one(&s));
+ secp256k1_scalar_negate(&o, &s);
+ secp256k1_scalar_add(&o, &o, &s);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ secp256k1_scalar_negate(&o, &o);
+ CHECK(secp256k1_scalar_is_zero(&o));
+ }
+
+#ifndef USE_NUM_NONE
+ {
+ /* A scalar with value of the curve order should be 0. */
+ secp256k1_num order;
+ secp256k1_scalar zero;
+ unsigned char bin[32];
+ int overflow = 0;
+ secp256k1_scalar_order_get_num(&order);
+ secp256k1_num_get_bin(bin, 32, &order);
+ secp256k1_scalar_set_b32(&zero, bin, &overflow);
+ CHECK(overflow == 1);
+ CHECK(secp256k1_scalar_is_zero(&zero));
+ }
+#endif
+
+ {
+ /* Does check_overflow check catch all ones? */
+ static const secp256k1_scalar overflowed = SECP256K1_SCALAR_CONST(
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL,
+ 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL, 0xFFFFFFFFUL
+ );
+ CHECK(secp256k1_scalar_check_overflow(&overflowed));
+ }
+
+ {
+ /* Static test vectors.
+ * These were reduced from ~10^12 random vectors based on comparison-decision
+ * and edge-case coverage on 32-bit and 64-bit implementations.
+ * The responses were generated with Sage 5.9.
+ */
+ secp256k1_scalar x;
+ secp256k1_scalar y;
+ secp256k1_scalar z;
+ secp256k1_scalar zz;
+ secp256k1_scalar one;
+ secp256k1_scalar r1;
+ secp256k1_scalar r2;
+#if defined(USE_SCALAR_INV_NUM)
+ secp256k1_scalar zzv;
+#endif
+ int overflow;
+ unsigned char chal[33][2][32] = {
+ {{0xff, 0xff, 0x03, 0x07, 0x00, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0xc0, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff}},
+ {{0xef, 0xff, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x80, 0xff}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0x3f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0x00},
+ {0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f, 0x00, 0xe0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x7f, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x1e, 0xf8, 0xff, 0xff, 0xff, 0xfd, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f,
+ 0x00, 0x00, 0x00, 0xf8, 0xff, 0x03, 0x00, 0xe0,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0xf0, 0xff,
+ 0xf3, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x1c, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00,
+ 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0x80, 0xff, 0xff, 0x3f,
+ 0x00, 0xfe, 0xff, 0xff, 0xff, 0xdf, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0x00, 0x0f, 0xfc, 0x9f,
+ 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0x0f, 0xfc, 0xff, 0x7f, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0x00, 0x00, 0xf8, 0xff, 0x0f, 0xc0, 0xff, 0xff,
+ 0xff, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x07, 0x80, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xf7, 0xff, 0xff, 0xef, 0xff, 0xff, 0xff, 0x00,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0xf0},
+ {0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0x00, 0xf8, 0xff, 0x03, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0xc0, 0xff, 0x0f, 0xfc, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xe0, 0xff, 0xff,
+ 0xff, 0x01, 0x00, 0x00, 0x00, 0x3f, 0x00, 0xc0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0x8f, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x7f, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x80, 0xff, 0x7f},
+ {0xff, 0xcf, 0xff, 0xff, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0xc0, 0xff, 0xcf, 0xff, 0xff, 0xff, 0xff,
+ 0xbf, 0xff, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xff, 0xff,
+ 0xff, 0xff, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0x01, 0xfc, 0xff, 0x01, 0x00, 0xfe, 0xff},
+ {0xff, 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7f, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0xf8, 0xff, 0x01, 0x00, 0xf0, 0xff, 0xff,
+ 0xe0, 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xfc, 0xff, 0xff, 0x3f, 0xf0, 0xff, 0xff, 0x3f,
+ 0x00, 0x00, 0xf8, 0x07, 0x00, 0x00, 0x00, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x0f, 0x7e, 0x00, 0x00}},
+ {{0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x1f, 0x00, 0x00, 0xfe, 0x07, 0x00},
+ {0x00, 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xfb, 0xff, 0x07, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60}},
+ {{0xff, 0x01, 0x00, 0xff, 0xff, 0xff, 0x0f, 0x00,
+ 0x80, 0x7f, 0xfe, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0x1f, 0x00, 0xf0, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x00, 0x00}},
+ {{0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf1, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xc0, 0xff, 0xff, 0xcf, 0xff, 0x1f, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x7e,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xfc, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0x00},
+ {0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0x7f, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x3f, 0x00, 0x00, 0x80,
+ 0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff,
+ 0xff, 0x7f, 0xf8, 0xff, 0xff, 0x1f, 0x00, 0xfe}},
+ {{0xff, 0xff, 0xff, 0x3f, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0x03, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0x80, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0xc0,
+ 0xff, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0x07, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xfe, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0x01, 0xff, 0xff, 0xff}},
+ {{0x7f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0x7e, 0x00, 0x00, 0xc0, 0xff, 0xff, 0x07, 0x00,
+ 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff},
+ {0xff, 0x01, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x03, 0x00, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}},
+ {{0xff, 0xff, 0xf0, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xf0, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xe0, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0xff, 0xff,
+ 0xff, 0xff, 0x3f, 0x00, 0xf8, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0x3f, 0x00, 0x00, 0xc0, 0xf1, 0x7f, 0x00}},
+ {{0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0xff, 0xff, 0x00},
+ {0x00, 0xf8, 0xff, 0xff, 0xff, 0xff, 0xff, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x80, 0x1f,
+ 0x00, 0x00, 0xfc, 0xff, 0xff, 0x01, 0xff, 0xff}},
+ {{0x00, 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0x00, 0x00, 0x80, 0xff, 0x03, 0xe0, 0x01,
+ 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0xfc, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00},
+ {0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00,
+ 0xfe, 0xff, 0xff, 0xf0, 0x07, 0x00, 0x3c, 0x80,
+ 0xff, 0xff, 0xff, 0xff, 0xfc, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0x07, 0xe0, 0xff, 0x00, 0x00, 0x00}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0xfc, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x07, 0xf8,
+ 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80},
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x0c, 0x80, 0x00,
+ 0x00, 0x00, 0x00, 0xc0, 0x7f, 0xfe, 0xff, 0x1f,
+ 0x00, 0xfe, 0xff, 0x03, 0x00, 0x00, 0xfe, 0xff}},
+ {{0xff, 0xff, 0x81, 0xff, 0xff, 0xff, 0xff, 0x00,
+ 0x80, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x83,
+ 0xff, 0xff, 0x00, 0x00, 0x80, 0x00, 0x00, 0x80,
+ 0xff, 0xff, 0x7f, 0x00, 0x00, 0x00, 0x00, 0xf0},
+ {0xff, 0x01, 0x00, 0x00, 0x00, 0x00, 0xf8, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x1f, 0x00, 0x00,
+ 0xf8, 0x07, 0x00, 0x80, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xc7, 0xff, 0xff, 0xe0, 0xff, 0xff, 0xff}},
+ {{0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb,
+ 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03},
+ {0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0x82, 0xc9, 0xfa, 0xb0, 0x68, 0x04, 0xa0, 0x00,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x6f, 0x03, 0xfb,
+ 0xfa, 0x8a, 0x7d, 0xdf, 0x13, 0x86, 0xe2, 0x03}}
+ };
+ unsigned char res[33][2][32] = {
+ {{0x0c, 0x3b, 0x0a, 0xca, 0x8d, 0x1a, 0x2f, 0xb9,
+ 0x8a, 0x7b, 0x53, 0x5a, 0x1f, 0xc5, 0x22, 0xa1,
+ 0x07, 0x2a, 0x48, 0xea, 0x02, 0xeb, 0xb3, 0xd6,
+ 0x20, 0x1e, 0x86, 0xd0, 0x95, 0xf6, 0x92, 0x35},
+ {0xdc, 0x90, 0x7a, 0x07, 0x2e, 0x1e, 0x44, 0x6d,
+ 0xf8, 0x15, 0x24, 0x5b, 0x5a, 0x96, 0x37, 0x9c,
+ 0x37, 0x7b, 0x0d, 0xac, 0x1b, 0x65, 0x58, 0x49,
+ 0x43, 0xb7, 0x31, 0xbb, 0xa7, 0xf4, 0x97, 0x15}},
+ {{0xf1, 0xf7, 0x3a, 0x50, 0xe6, 0x10, 0xba, 0x22,
+ 0x43, 0x4d, 0x1f, 0x1f, 0x7c, 0x27, 0xca, 0x9c,
+ 0xb8, 0xb6, 0xa0, 0xfc, 0xd8, 0xc0, 0x05, 0x2f,
+ 0xf7, 0x08, 0xe1, 0x76, 0xdd, 0xd0, 0x80, 0xc8},
+ {0xe3, 0x80, 0x80, 0xb8, 0xdb, 0xe3, 0xa9, 0x77,
+ 0x00, 0xb0, 0xf5, 0x2e, 0x27, 0xe2, 0x68, 0xc4,
+ 0x88, 0xe8, 0x04, 0xc1, 0x12, 0xbf, 0x78, 0x59,
+ 0xe6, 0xa9, 0x7c, 0xe1, 0x81, 0xdd, 0xb9, 0xd5}},
+ {{0x96, 0xe2, 0xee, 0x01, 0xa6, 0x80, 0x31, 0xef,
+ 0x5c, 0xd0, 0x19, 0xb4, 0x7d, 0x5f, 0x79, 0xab,
+ 0xa1, 0x97, 0xd3, 0x7e, 0x33, 0xbb, 0x86, 0x55,
+ 0x60, 0x20, 0x10, 0x0d, 0x94, 0x2d, 0x11, 0x7c},
+ {0xcc, 0xab, 0xe0, 0xe8, 0x98, 0x65, 0x12, 0x96,
+ 0x38, 0x5a, 0x1a, 0xf2, 0x85, 0x23, 0x59, 0x5f,
+ 0xf9, 0xf3, 0xc2, 0x81, 0x70, 0x92, 0x65, 0x12,
+ 0x9c, 0x65, 0x1e, 0x96, 0x00, 0xef, 0xe7, 0x63}},
+ {{0xac, 0x1e, 0x62, 0xc2, 0x59, 0xfc, 0x4e, 0x5c,
+ 0x83, 0xb0, 0xd0, 0x6f, 0xce, 0x19, 0xf6, 0xbf,
+ 0xa4, 0xb0, 0xe0, 0x53, 0x66, 0x1f, 0xbf, 0xc9,
+ 0x33, 0x47, 0x37, 0xa9, 0x3d, 0x5d, 0xb0, 0x48},
+ {0x86, 0xb9, 0x2a, 0x7f, 0x8e, 0xa8, 0x60, 0x42,
+ 0x26, 0x6d, 0x6e, 0x1c, 0xa2, 0xec, 0xe0, 0xe5,
+ 0x3e, 0x0a, 0x33, 0xbb, 0x61, 0x4c, 0x9f, 0x3c,
+ 0xd1, 0xdf, 0x49, 0x33, 0xcd, 0x72, 0x78, 0x18}},
+ {{0xf7, 0xd3, 0xcd, 0x49, 0x5c, 0x13, 0x22, 0xfb,
+ 0x2e, 0xb2, 0x2f, 0x27, 0xf5, 0x8a, 0x5d, 0x74,
+ 0xc1, 0x58, 0xc5, 0xc2, 0x2d, 0x9f, 0x52, 0xc6,
+ 0x63, 0x9f, 0xba, 0x05, 0x76, 0x45, 0x7a, 0x63},
+ {0x8a, 0xfa, 0x55, 0x4d, 0xdd, 0xa3, 0xb2, 0xc3,
+ 0x44, 0xfd, 0xec, 0x72, 0xde, 0xef, 0xc0, 0x99,
+ 0xf5, 0x9f, 0xe2, 0x52, 0xb4, 0x05, 0x32, 0x58,
+ 0x57, 0xc1, 0x8f, 0xea, 0xc3, 0x24, 0x5b, 0x94}},
+ {{0x05, 0x83, 0xee, 0xdd, 0x64, 0xf0, 0x14, 0x3b,
+ 0xa0, 0x14, 0x4a, 0x3a, 0x41, 0x82, 0x7c, 0xa7,
+ 0x2c, 0xaa, 0xb1, 0x76, 0xbb, 0x59, 0x64, 0x5f,
+ 0x52, 0xad, 0x25, 0x29, 0x9d, 0x8f, 0x0b, 0xb0},
+ {0x7e, 0xe3, 0x7c, 0xca, 0xcd, 0x4f, 0xb0, 0x6d,
+ 0x7a, 0xb2, 0x3e, 0xa0, 0x08, 0xb9, 0xa8, 0x2d,
+ 0xc2, 0xf4, 0x99, 0x66, 0xcc, 0xac, 0xd8, 0xb9,
+ 0x72, 0x2a, 0x4a, 0x3e, 0x0f, 0x7b, 0xbf, 0xf4}},
+ {{0x8c, 0x9c, 0x78, 0x2b, 0x39, 0x61, 0x7e, 0xf7,
+ 0x65, 0x37, 0x66, 0x09, 0x38, 0xb9, 0x6f, 0x70,
+ 0x78, 0x87, 0xff, 0xcf, 0x93, 0xca, 0x85, 0x06,
+ 0x44, 0x84, 0xa7, 0xfe, 0xd3, 0xa4, 0xe3, 0x7e},
+ {0xa2, 0x56, 0x49, 0x23, 0x54, 0xa5, 0x50, 0xe9,
+ 0x5f, 0xf0, 0x4d, 0xe7, 0xdc, 0x38, 0x32, 0x79,
+ 0x4f, 0x1c, 0xb7, 0xe4, 0xbb, 0xf8, 0xbb, 0x2e,
+ 0x40, 0x41, 0x4b, 0xcc, 0xe3, 0x1e, 0x16, 0x36}},
+ {{0x0c, 0x1e, 0xd7, 0x09, 0x25, 0x40, 0x97, 0xcb,
+ 0x5c, 0x46, 0xa8, 0xda, 0xef, 0x25, 0xd5, 0xe5,
+ 0x92, 0x4d, 0xcf, 0xa3, 0xc4, 0x5d, 0x35, 0x4a,
+ 0xe4, 0x61, 0x92, 0xf3, 0xbf, 0x0e, 0xcd, 0xbe},
+ {0xe4, 0xaf, 0x0a, 0xb3, 0x30, 0x8b, 0x9b, 0x48,
+ 0x49, 0x43, 0xc7, 0x64, 0x60, 0x4a, 0x2b, 0x9e,
+ 0x95, 0x5f, 0x56, 0xe8, 0x35, 0xdc, 0xeb, 0xdc,
+ 0xc7, 0xc4, 0xfe, 0x30, 0x40, 0xc7, 0xbf, 0xa4}},
+ {{0xd4, 0xa0, 0xf5, 0x81, 0x49, 0x6b, 0xb6, 0x8b,
+ 0x0a, 0x69, 0xf9, 0xfe, 0xa8, 0x32, 0xe5, 0xe0,
+ 0xa5, 0xcd, 0x02, 0x53, 0xf9, 0x2c, 0xe3, 0x53,
+ 0x83, 0x36, 0xc6, 0x02, 0xb5, 0xeb, 0x64, 0xb8},
+ {0x1d, 0x42, 0xb9, 0xf9, 0xe9, 0xe3, 0x93, 0x2c,
+ 0x4c, 0xee, 0x6c, 0x5a, 0x47, 0x9e, 0x62, 0x01,
+ 0x6b, 0x04, 0xfe, 0xa4, 0x30, 0x2b, 0x0d, 0x4f,
+ 0x71, 0x10, 0xd3, 0x55, 0xca, 0xf3, 0x5e, 0x80}},
+ {{0x77, 0x05, 0xf6, 0x0c, 0x15, 0x9b, 0x45, 0xe7,
+ 0xb9, 0x11, 0xb8, 0xf5, 0xd6, 0xda, 0x73, 0x0c,
+ 0xda, 0x92, 0xea, 0xd0, 0x9d, 0xd0, 0x18, 0x92,
+ 0xce, 0x9a, 0xaa, 0xee, 0x0f, 0xef, 0xde, 0x30},
+ {0xf1, 0xf1, 0xd6, 0x9b, 0x51, 0xd7, 0x77, 0x62,
+ 0x52, 0x10, 0xb8, 0x7a, 0x84, 0x9d, 0x15, 0x4e,
+ 0x07, 0xdc, 0x1e, 0x75, 0x0d, 0x0c, 0x3b, 0xdb,
+ 0x74, 0x58, 0x62, 0x02, 0x90, 0x54, 0x8b, 0x43}},
+ {{0xa6, 0xfe, 0x0b, 0x87, 0x80, 0x43, 0x67, 0x25,
+ 0x57, 0x5d, 0xec, 0x40, 0x50, 0x08, 0xd5, 0x5d,
+ 0x43, 0xd7, 0xe0, 0xaa, 0xe0, 0x13, 0xb6, 0xb0,
+ 0xc0, 0xd4, 0xe5, 0x0d, 0x45, 0x83, 0xd6, 0x13},
+ {0x40, 0x45, 0x0a, 0x92, 0x31, 0xea, 0x8c, 0x60,
+ 0x8c, 0x1f, 0xd8, 0x76, 0x45, 0xb9, 0x29, 0x00,
+ 0x26, 0x32, 0xd8, 0xa6, 0x96, 0x88, 0xe2, 0xc4,
+ 0x8b, 0xdb, 0x7f, 0x17, 0x87, 0xcc, 0xc8, 0xf2}},
+ {{0xc2, 0x56, 0xe2, 0xb6, 0x1a, 0x81, 0xe7, 0x31,
+ 0x63, 0x2e, 0xbb, 0x0d, 0x2f, 0x81, 0x67, 0xd4,
+ 0x22, 0xe2, 0x38, 0x02, 0x25, 0x97, 0xc7, 0x88,
+ 0x6e, 0xdf, 0xbe, 0x2a, 0xa5, 0x73, 0x63, 0xaa},
+ {0x50, 0x45, 0xe2, 0xc3, 0xbd, 0x89, 0xfc, 0x57,
+ 0xbd, 0x3c, 0xa3, 0x98, 0x7e, 0x7f, 0x36, 0x38,
+ 0x92, 0x39, 0x1f, 0x0f, 0x81, 0x1a, 0x06, 0x51,
+ 0x1f, 0x8d, 0x6a, 0xff, 0x47, 0x16, 0x06, 0x9c}},
+ {{0x33, 0x95, 0xa2, 0x6f, 0x27, 0x5f, 0x9c, 0x9c,
+ 0x64, 0x45, 0xcb, 0xd1, 0x3c, 0xee, 0x5e, 0x5f,
+ 0x48, 0xa6, 0xaf, 0xe3, 0x79, 0xcf, 0xb1, 0xe2,
+ 0xbf, 0x55, 0x0e, 0xa2, 0x3b, 0x62, 0xf0, 0xe4},
+ {0x14, 0xe8, 0x06, 0xe3, 0xbe, 0x7e, 0x67, 0x01,
+ 0xc5, 0x21, 0x67, 0xd8, 0x54, 0xb5, 0x7f, 0xa4,
+ 0xf9, 0x75, 0x70, 0x1c, 0xfd, 0x79, 0xdb, 0x86,
+ 0xad, 0x37, 0x85, 0x83, 0x56, 0x4e, 0xf0, 0xbf}},
+ {{0xbc, 0xa6, 0xe0, 0x56, 0x4e, 0xef, 0xfa, 0xf5,
+ 0x1d, 0x5d, 0x3f, 0x2a, 0x5b, 0x19, 0xab, 0x51,
+ 0xc5, 0x8b, 0xdd, 0x98, 0x28, 0x35, 0x2f, 0xc3,
+ 0x81, 0x4f, 0x5c, 0xe5, 0x70, 0xb9, 0xeb, 0x62},
+ {0xc4, 0x6d, 0x26, 0xb0, 0x17, 0x6b, 0xfe, 0x6c,
+ 0x12, 0xf8, 0xe7, 0xc1, 0xf5, 0x2f, 0xfa, 0x91,
+ 0x13, 0x27, 0xbd, 0x73, 0xcc, 0x33, 0x31, 0x1c,
+ 0x39, 0xe3, 0x27, 0x6a, 0x95, 0xcf, 0xc5, 0xfb}},
+ {{0x30, 0xb2, 0x99, 0x84, 0xf0, 0x18, 0x2a, 0x6e,
+ 0x1e, 0x27, 0xed, 0xa2, 0x29, 0x99, 0x41, 0x56,
+ 0xe8, 0xd4, 0x0d, 0xef, 0x99, 0x9c, 0xf3, 0x58,
+ 0x29, 0x55, 0x1a, 0xc0, 0x68, 0xd6, 0x74, 0xa4},
+ {0x07, 0x9c, 0xe7, 0xec, 0xf5, 0x36, 0x73, 0x41,
+ 0xa3, 0x1c, 0xe5, 0x93, 0x97, 0x6a, 0xfd, 0xf7,
+ 0x53, 0x18, 0xab, 0xaf, 0xeb, 0x85, 0xbd, 0x92,
+ 0x90, 0xab, 0x3c, 0xbf, 0x30, 0x82, 0xad, 0xf6}},
+ {{0xc6, 0x87, 0x8a, 0x2a, 0xea, 0xc0, 0xa9, 0xec,
+ 0x6d, 0xd3, 0xdc, 0x32, 0x23, 0xce, 0x62, 0x19,
+ 0xa4, 0x7e, 0xa8, 0xdd, 0x1c, 0x33, 0xae, 0xd3,
+ 0x4f, 0x62, 0x9f, 0x52, 0xe7, 0x65, 0x46, 0xf4},
+ {0x97, 0x51, 0x27, 0x67, 0x2d, 0xa2, 0x82, 0x87,
+ 0x98, 0xd3, 0xb6, 0x14, 0x7f, 0x51, 0xd3, 0x9a,
+ 0x0b, 0xd0, 0x76, 0x81, 0xb2, 0x4f, 0x58, 0x92,
+ 0xa4, 0x86, 0xa1, 0xa7, 0x09, 0x1d, 0xef, 0x9b}},
+ {{0xb3, 0x0f, 0x2b, 0x69, 0x0d, 0x06, 0x90, 0x64,
+ 0xbd, 0x43, 0x4c, 0x10, 0xe8, 0x98, 0x1c, 0xa3,
+ 0xe1, 0x68, 0xe9, 0x79, 0x6c, 0x29, 0x51, 0x3f,
+ 0x41, 0xdc, 0xdf, 0x1f, 0xf3, 0x60, 0xbe, 0x33},
+ {0xa1, 0x5f, 0xf7, 0x1d, 0xb4, 0x3e, 0x9b, 0x3c,
+ 0xe7, 0xbd, 0xb6, 0x06, 0xd5, 0x60, 0x06, 0x6d,
+ 0x50, 0xd2, 0xf4, 0x1a, 0x31, 0x08, 0xf2, 0xea,
+ 0x8e, 0xef, 0x5f, 0x7d, 0xb6, 0xd0, 0xc0, 0x27}},
+ {{0x62, 0x9a, 0xd9, 0xbb, 0x38, 0x36, 0xce, 0xf7,
+ 0x5d, 0x2f, 0x13, 0xec, 0xc8, 0x2d, 0x02, 0x8a,
+ 0x2e, 0x72, 0xf0, 0xe5, 0x15, 0x9d, 0x72, 0xae,
+ 0xfc, 0xb3, 0x4f, 0x02, 0xea, 0xe1, 0x09, 0xfe},
+ {0x00, 0x00, 0x00, 0x00, 0xfa, 0x0a, 0x3d, 0xbc,
+ 0xad, 0x16, 0x0c, 0xb6, 0xe7, 0x7c, 0x8b, 0x39,
+ 0x9a, 0x43, 0xbb, 0xe3, 0xc2, 0x55, 0x15, 0x14,
+ 0x75, 0xac, 0x90, 0x9b, 0x7f, 0x9a, 0x92, 0x00}},
+ {{0x8b, 0xac, 0x70, 0x86, 0x29, 0x8f, 0x00, 0x23,
+ 0x7b, 0x45, 0x30, 0xaa, 0xb8, 0x4c, 0xc7, 0x8d,
+ 0x4e, 0x47, 0x85, 0xc6, 0x19, 0xe3, 0x96, 0xc2,
+ 0x9a, 0xa0, 0x12, 0xed, 0x6f, 0xd7, 0x76, 0x16},
+ {0x45, 0xaf, 0x7e, 0x33, 0xc7, 0x7f, 0x10, 0x6c,
+ 0x7c, 0x9f, 0x29, 0xc1, 0xa8, 0x7e, 0x15, 0x84,
+ 0xe7, 0x7d, 0xc0, 0x6d, 0xab, 0x71, 0x5d, 0xd0,
+ 0x6b, 0x9f, 0x97, 0xab, 0xcb, 0x51, 0x0c, 0x9f}},
+ {{0x9e, 0xc3, 0x92, 0xb4, 0x04, 0x9f, 0xc8, 0xbb,
+ 0xdd, 0x9e, 0xc6, 0x05, 0xfd, 0x65, 0xec, 0x94,
+ 0x7f, 0x2c, 0x16, 0xc4, 0x40, 0xac, 0x63, 0x7b,
+ 0x7d, 0xb8, 0x0c, 0xe4, 0x5b, 0xe3, 0xa7, 0x0e},
+ {0x43, 0xf4, 0x44, 0xe8, 0xcc, 0xc8, 0xd4, 0x54,
+ 0x33, 0x37, 0x50, 0xf2, 0x87, 0x42, 0x2e, 0x00,
+ 0x49, 0x60, 0x62, 0x02, 0xfd, 0x1a, 0x7c, 0xdb,
+ 0x29, 0x6c, 0x6d, 0x54, 0x53, 0x08, 0xd1, 0xc8}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92},
+ {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}},
+ {{0x28, 0x56, 0xac, 0x0e, 0x4f, 0x98, 0x09, 0xf0,
+ 0x49, 0xfa, 0x7f, 0x84, 0xac, 0x7e, 0x50, 0x5b,
+ 0x17, 0x43, 0x14, 0x89, 0x9c, 0x53, 0xa8, 0x94,
+ 0x30, 0xf2, 0x11, 0x4d, 0x92, 0x14, 0x27, 0xe8},
+ {0x39, 0x7a, 0x84, 0x56, 0x79, 0x9d, 0xec, 0x26,
+ 0x2c, 0x53, 0xc1, 0x94, 0xc9, 0x8d, 0x9e, 0x9d,
+ 0x32, 0x1f, 0xdd, 0x84, 0x04, 0xe8, 0xe2, 0x0a,
+ 0x6b, 0xbe, 0xbb, 0x42, 0x40, 0x67, 0x30, 0x6c}},
+ {{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x73, 0x2f, 0xc9, 0xbe, 0xbd},
+ {0x27, 0x59, 0xc7, 0x35, 0x60, 0x71, 0xa6, 0xf1,
+ 0x79, 0xa5, 0xfd, 0x79, 0x16, 0xf3, 0x41, 0xf0,
+ 0x57, 0xb4, 0x02, 0x97, 0x32, 0xe7, 0xde, 0x59,
+ 0xe2, 0x2d, 0x9b, 0x11, 0xea, 0x2c, 0x35, 0x92}},
+ {{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}},
+ {{0x1c, 0xc4, 0xf7, 0xda, 0x0f, 0x65, 0xca, 0x39,
+ 0x70, 0x52, 0x92, 0x8e, 0xc3, 0xc8, 0x15, 0xea,
+ 0x7f, 0x10, 0x9e, 0x77, 0x4b, 0x6e, 0x2d, 0xdf,
+ 0xe8, 0x30, 0x9d, 0xda, 0xe8, 0x9a, 0x65, 0xae},
+ {0x02, 0xb0, 0x16, 0xb1, 0x1d, 0xc8, 0x57, 0x7b,
+ 0xa2, 0x3a, 0xa2, 0xa3, 0x38, 0x5c, 0x8f, 0xeb,
+ 0x66, 0x37, 0x91, 0xa8, 0x5f, 0xef, 0x04, 0xf6,
+ 0x59, 0x75, 0xe1, 0xee, 0x92, 0xf6, 0x0e, 0x30}},
+ {{0x8d, 0x76, 0x14, 0xa4, 0x14, 0x06, 0x9f, 0x9a,
+ 0xdf, 0x4a, 0x85, 0xa7, 0x6b, 0xbf, 0x29, 0x6f,
+ 0xbc, 0x34, 0x87, 0x5d, 0xeb, 0xbb, 0x2e, 0xa9,
+ 0xc9, 0x1f, 0x58, 0xd6, 0x9a, 0x82, 0xa0, 0x56},
+ {0xd4, 0xb9, 0xdb, 0x88, 0x1d, 0x04, 0xe9, 0x93,
+ 0x8d, 0x3f, 0x20, 0xd5, 0x86, 0xa8, 0x83, 0x07,
+ 0xdb, 0x09, 0xd8, 0x22, 0x1f, 0x7f, 0xf1, 0x71,
+ 0xc8, 0xe7, 0x5d, 0x47, 0xaf, 0x8b, 0x72, 0xe9}},
+ {{0x83, 0xb9, 0x39, 0xb2, 0xa4, 0xdf, 0x46, 0x87,
+ 0xc2, 0xb8, 0xf1, 0xe6, 0x4c, 0xd1, 0xe2, 0xa9,
+ 0xe4, 0x70, 0x30, 0x34, 0xbc, 0x52, 0x7c, 0x55,
+ 0xa6, 0xec, 0x80, 0xa4, 0xe5, 0xd2, 0xdc, 0x73},
+ {0x08, 0xf1, 0x03, 0xcf, 0x16, 0x73, 0xe8, 0x7d,
+ 0xb6, 0x7e, 0x9b, 0xc0, 0xb4, 0xc2, 0xa5, 0x86,
+ 0x02, 0x77, 0xd5, 0x27, 0x86, 0xa5, 0x15, 0xfb,
+ 0xae, 0x9b, 0x8c, 0xa9, 0xf9, 0xf8, 0xa8, 0x4a}},
+ {{0x8b, 0x00, 0x49, 0xdb, 0xfa, 0xf0, 0x1b, 0xa2,
+ 0xed, 0x8a, 0x9a, 0x7a, 0x36, 0x78, 0x4a, 0xc7,
+ 0xf7, 0xad, 0x39, 0xd0, 0x6c, 0x65, 0x7a, 0x41,
+ 0xce, 0xd6, 0xd6, 0x4c, 0x20, 0x21, 0x6b, 0xc7},
+ {0xc6, 0xca, 0x78, 0x1d, 0x32, 0x6c, 0x6c, 0x06,
+ 0x91, 0xf2, 0x1a, 0xe8, 0x43, 0x16, 0xea, 0x04,
+ 0x3c, 0x1f, 0x07, 0x85, 0xf7, 0x09, 0x22, 0x08,
+ 0xba, 0x13, 0xfd, 0x78, 0x1e, 0x3f, 0x6f, 0x62}},
+ {{0x25, 0x9b, 0x7c, 0xb0, 0xac, 0x72, 0x6f, 0xb2,
+ 0xe3, 0x53, 0x84, 0x7a, 0x1a, 0x9a, 0x98, 0x9b,
+ 0x44, 0xd3, 0x59, 0xd0, 0x8e, 0x57, 0x41, 0x40,
+ 0x78, 0xa7, 0x30, 0x2f, 0x4c, 0x9c, 0xb9, 0x68},
+ {0xb7, 0x75, 0x03, 0x63, 0x61, 0xc2, 0x48, 0x6e,
+ 0x12, 0x3d, 0xbf, 0x4b, 0x27, 0xdf, 0xb1, 0x7a,
+ 0xff, 0x4e, 0x31, 0x07, 0x83, 0xf4, 0x62, 0x5b,
+ 0x19, 0xa5, 0xac, 0xa0, 0x32, 0x58, 0x0d, 0xa7}},
+ {{0x43, 0x4f, 0x10, 0xa4, 0xca, 0xdb, 0x38, 0x67,
+ 0xfa, 0xae, 0x96, 0xb5, 0x6d, 0x97, 0xff, 0x1f,
+ 0xb6, 0x83, 0x43, 0xd3, 0xa0, 0x2d, 0x70, 0x7a,
+ 0x64, 0x05, 0x4c, 0xa7, 0xc1, 0xa5, 0x21, 0x51},
+ {0xe4, 0xf1, 0x23, 0x84, 0xe1, 0xb5, 0x9d, 0xf2,
+ 0xb8, 0x73, 0x8b, 0x45, 0x2b, 0x35, 0x46, 0x38,
+ 0x10, 0x2b, 0x50, 0xf8, 0x8b, 0x35, 0xcd, 0x34,
+ 0xc8, 0x0e, 0xf6, 0xdb, 0x09, 0x35, 0xf0, 0xda}},
+ {{0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34,
+ 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13,
+ 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46,
+ 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5},
+ {0xdb, 0x21, 0x5c, 0x8d, 0x83, 0x1d, 0xb3, 0x34,
+ 0xc7, 0x0e, 0x43, 0xa1, 0x58, 0x79, 0x67, 0x13,
+ 0x1e, 0x86, 0x5d, 0x89, 0x63, 0xe6, 0x0a, 0x46,
+ 0x5c, 0x02, 0x97, 0x1b, 0x62, 0x43, 0x86, 0xf5}}
+ };
+ secp256k1_scalar_set_int(&one, 1);
+ for (i = 0; i < 33; i++) {
+ secp256k1_scalar_set_b32(&x, chal[i][0], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&y, chal[i][1], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&r1, res[i][0], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_set_b32(&r2, res[i][1], &overflow);
+ CHECK(!overflow);
+ secp256k1_scalar_mul(&z, &x, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ CHECK(secp256k1_scalar_eq(&r1, &z));
+ if (!secp256k1_scalar_is_zero(&y)) {
+ secp256k1_scalar_inverse(&zz, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+#if defined(USE_SCALAR_INV_NUM)
+ secp256k1_scalar_inverse_var(&zzv, &y);
+ CHECK(secp256k1_scalar_eq(&zzv, &zz));
+#endif
+ secp256k1_scalar_mul(&z, &z, &zz);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ CHECK(secp256k1_scalar_eq(&x, &z));
+ secp256k1_scalar_mul(&zz, &zz, &y);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+ CHECK(secp256k1_scalar_eq(&one, &zz));
+ }
+ secp256k1_scalar_mul(&z, &x, &x);
+ CHECK(!secp256k1_scalar_check_overflow(&z));
+ secp256k1_scalar_sqr(&zz, &x);
+ CHECK(!secp256k1_scalar_check_overflow(&zz));
+ CHECK(secp256k1_scalar_eq(&zz, &z));
+ CHECK(secp256k1_scalar_eq(&r2, &zz));
+ }
+ }
+}
+
+/***** FIELD TESTS *****/
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+void random_fe_test(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256_test(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+void random_fe_non_zero(secp256k1_fe *nz) {
+ int tries = 10;
+ while (--tries >= 0) {
+ random_fe(nz);
+ secp256k1_fe_normalize(nz);
+ if (!secp256k1_fe_is_zero(nz)) {
+ break;
+ }
+ }
+ /* Infinitesimal probability of spurious failure here */
+ CHECK(tries >= 0);
+}
+
+void random_fe_non_square(secp256k1_fe *ns) {
+ secp256k1_fe r;
+ random_fe_non_zero(ns);
+ if (secp256k1_fe_sqrt(&r, ns)) {
+ secp256k1_fe_negate(ns, ns, 1);
+ }
+}
+
+int check_fe_equal(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe an = *a;
+ secp256k1_fe bn = *b;
+ secp256k1_fe_normalize_weak(&an);
+ secp256k1_fe_normalize_var(&bn);
+ return secp256k1_fe_equal_var(&an, &bn);
+}
+
+int check_fe_inverse(const secp256k1_fe *a, const secp256k1_fe *ai) {
+ secp256k1_fe x;
+ secp256k1_fe one = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_fe_mul(&x, a, ai);
+ return check_fe_equal(&x, &one);
+}
+
+void run_field_convert(void) {
+ static const unsigned char b32[32] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
+ 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29,
+ 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40
+ };
+ static const secp256k1_fe_storage fes = SECP256K1_FE_STORAGE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ static const secp256k1_fe fe = SECP256K1_FE_CONST(
+ 0x00010203UL, 0x04050607UL, 0x11121314UL, 0x15161718UL,
+ 0x22232425UL, 0x26272829UL, 0x33343536UL, 0x37383940UL
+ );
+ secp256k1_fe fe2;
+ unsigned char b322[32];
+ secp256k1_fe_storage fes2;
+ /* Check conversions to fe. */
+ CHECK(secp256k1_fe_set_b32(&fe2, b32));
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ secp256k1_fe_from_storage(&fe2, &fes);
+ CHECK(secp256k1_fe_equal_var(&fe, &fe2));
+ /* Check conversion from fe. */
+ secp256k1_fe_get_b32(b322, &fe);
+ CHECK(memcmp(b322, b32, 32) == 0);
+ secp256k1_fe_to_storage(&fes2, &fe);
+ CHECK(memcmp(&fes2, &fes, sizeof(fes)) == 0);
+}
+
+int fe_memcmp(const secp256k1_fe *a, const secp256k1_fe *b) {
+ secp256k1_fe t = *b;
+#ifdef VERIFY
+ t.magnitude = a->magnitude;
+ t.normalized = a->normalized;
+#endif
+ return memcmp(a, &t, sizeof(secp256k1_fe));
+}
+
+void run_field_misc(void) {
+ secp256k1_fe x;
+ secp256k1_fe y;
+ secp256k1_fe z;
+ secp256k1_fe q;
+ secp256k1_fe fe5 = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 5);
+ int i, j;
+ for (i = 0; i < 5*count; i++) {
+ secp256k1_fe_storage xs, ys, zs;
+ random_fe(&x);
+ random_fe_non_zero(&y);
+ /* Test the fe equality and comparison operations. */
+ CHECK(secp256k1_fe_cmp_var(&x, &x) == 0);
+ CHECK(secp256k1_fe_equal_var(&x, &x));
+ z = x;
+ secp256k1_fe_add(&z,&y);
+ /* Test fe conditional move; z is not normalized here. */
+ q = x;
+ secp256k1_fe_cmov(&x, &z, 0);
+ VERIFY_CHECK(!x.normalized && x.magnitude == z.magnitude);
+ secp256k1_fe_cmov(&x, &x, 1);
+ CHECK(fe_memcmp(&x, &z) != 0);
+ CHECK(fe_memcmp(&x, &q) == 0);
+ secp256k1_fe_cmov(&q, &z, 1);
+ VERIFY_CHECK(!q.normalized && q.magnitude == z.magnitude);
+ CHECK(fe_memcmp(&q, &z) == 0);
+ secp256k1_fe_normalize_var(&x);
+ secp256k1_fe_normalize_var(&z);
+ CHECK(!secp256k1_fe_equal_var(&x, &z));
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (i&1));
+ VERIFY_CHECK(q.normalized && q.magnitude == 1);
+ for (j = 0; j < 6; j++) {
+ secp256k1_fe_negate(&z, &z, j+1);
+ secp256k1_fe_normalize_var(&q);
+ secp256k1_fe_cmov(&q, &z, (j&1));
+ VERIFY_CHECK(!q.normalized && q.magnitude == (j+2));
+ }
+ secp256k1_fe_normalize_var(&z);
+ /* Test storage conversion and conditional moves. */
+ secp256k1_fe_to_storage(&xs, &x);
+ secp256k1_fe_to_storage(&ys, &y);
+ secp256k1_fe_to_storage(&zs, &z);
+ secp256k1_fe_storage_cmov(&zs, &xs, 0);
+ secp256k1_fe_storage_cmov(&zs, &zs, 1);
+ CHECK(memcmp(&xs, &zs, sizeof(xs)) != 0);
+ secp256k1_fe_storage_cmov(&ys, &xs, 1);
+ CHECK(memcmp(&xs, &ys, sizeof(xs)) == 0);
+ secp256k1_fe_from_storage(&x, &xs);
+ secp256k1_fe_from_storage(&y, &ys);
+ secp256k1_fe_from_storage(&z, &zs);
+ /* Test that mul_int, mul, and add agree. */
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&y, &x);
+ z = x;
+ secp256k1_fe_mul_int(&z, 3);
+ CHECK(check_fe_equal(&y, &z));
+ secp256k1_fe_add(&y, &x);
+ secp256k1_fe_add(&z, &x);
+ CHECK(check_fe_equal(&z, &y));
+ z = x;
+ secp256k1_fe_mul_int(&z, 5);
+ secp256k1_fe_mul(&q, &x, &fe5);
+ CHECK(check_fe_equal(&z, &q));
+ secp256k1_fe_negate(&x, &x, 1);
+ secp256k1_fe_add(&z, &x);
+ secp256k1_fe_add(&q, &x);
+ CHECK(check_fe_equal(&y, &z));
+ CHECK(check_fe_equal(&q, &y));
+ }
+}
+
+void run_field_inv(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_var(void) {
+ secp256k1_fe x, xi, xii;
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ random_fe_non_zero(&x);
+ secp256k1_fe_inv_var(&xi, &x);
+ CHECK(check_fe_inverse(&x, &xi));
+ secp256k1_fe_inv_var(&xii, &xi);
+ CHECK(check_fe_equal(&x, &xii));
+ }
+}
+
+void run_field_inv_all_var(void) {
+ secp256k1_fe x[16], xi[16], xii[16];
+ int i;
+ /* Check it's safe to call for 0 elements */
+ secp256k1_fe_inv_all_var(xi, x, 0);
+ for (i = 0; i < count; i++) {
+ size_t j;
+ size_t len = secp256k1_rand_int(15) + 1;
+ for (j = 0; j < len; j++) {
+ random_fe_non_zero(&x[j]);
+ }
+ secp256k1_fe_inv_all_var(xi, x, len);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_inverse(&x[j], &xi[j]));
+ }
+ secp256k1_fe_inv_all_var(xii, xi, len);
+ for (j = 0; j < len; j++) {
+ CHECK(check_fe_equal(&x[j], &xii[j]));
+ }
+ }
+}
+
+void run_sqr(void) {
+ secp256k1_fe x, s;
+
+ {
+ int i;
+ secp256k1_fe_set_int(&x, 1);
+ secp256k1_fe_negate(&x, &x, 1);
+
+ for (i = 1; i <= 512; ++i) {
+ secp256k1_fe_mul_int(&x, 2);
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_sqr(&s, &x);
+ }
+ }
+}
+
+void test_sqrt(const secp256k1_fe *a, const secp256k1_fe *k) {
+ secp256k1_fe r1, r2;
+ int v = secp256k1_fe_sqrt(&r1, a);
+ CHECK((v == 0) == (k == NULL));
+
+ if (k != NULL) {
+ /* Check that the returned root is +/- the given known answer */
+ secp256k1_fe_negate(&r2, &r1, 1);
+ secp256k1_fe_add(&r1, k); secp256k1_fe_add(&r2, k);
+ secp256k1_fe_normalize(&r1); secp256k1_fe_normalize(&r2);
+ CHECK(secp256k1_fe_is_zero(&r1) || secp256k1_fe_is_zero(&r2));
+ }
+}
+
+void run_sqrt(void) {
+ secp256k1_fe ns, x, s, t;
+ int i;
+
+ /* Check sqrt(0) is 0 */
+ secp256k1_fe_set_int(&x, 0);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+
+ /* Check sqrt of small squares (and their negatives) */
+ for (i = 1; i <= 100; i++) {
+ secp256k1_fe_set_int(&x, i);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ }
+
+ /* Consistency checks for large random values */
+ for (i = 0; i < 10; i++) {
+ int j;
+ random_fe_non_square(&ns);
+ for (j = 0; j < count; j++) {
+ random_fe(&x);
+ secp256k1_fe_sqr(&s, &x);
+ test_sqrt(&s, &x);
+ secp256k1_fe_negate(&t, &s, 1);
+ test_sqrt(&t, NULL);
+ secp256k1_fe_mul(&t, &s, &ns);
+ test_sqrt(&t, NULL);
+ }
+ }
+}
+
+/***** GROUP TESTS *****/
+
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+/* This compares jacobian points including their Z, not just their geometric meaning. */
+int gej_xyz_equals_gej(const secp256k1_gej *a, const secp256k1_gej *b) {
+ secp256k1_gej a2;
+ secp256k1_gej b2;
+ int ret = 1;
+ ret &= a->infinity == b->infinity;
+ if (ret && !a->infinity) {
+ a2 = *a;
+ b2 = *b;
+ secp256k1_fe_normalize(&a2.x);
+ secp256k1_fe_normalize(&a2.y);
+ secp256k1_fe_normalize(&a2.z);
+ secp256k1_fe_normalize(&b2.x);
+ secp256k1_fe_normalize(&b2.y);
+ secp256k1_fe_normalize(&b2.z);
+ ret &= secp256k1_fe_cmp_var(&a2.x, &b2.x) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.y, &b2.y) == 0;
+ ret &= secp256k1_fe_cmp_var(&a2.z, &b2.z) == 0;
+ }
+ return ret;
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void test_ge(void) {
+ int i, i1;
+#ifdef USE_ENDOMORPHISM
+ int runs = 6;
+#else
+ int runs = 4;
+#endif
+ /* Points: (infinity, p1, p1, -p1, -p1, p2, p2, -p2, -p2, p3, p3, -p3, -p3, p4, p4, -p4, -p4).
+ * The second in each pair of identical points uses a random Z coordinate in the Jacobian form.
+ * All magnitudes are randomized.
+ * All 17*17 combinations of points are added to each other, using all applicable methods.
+ *
+ * When the endomorphism code is compiled in, p5 = lambda*p1 and p6 = lambda^2*p1 are added as well.
+ */
+ secp256k1_ge *ge = (secp256k1_ge *)malloc(sizeof(secp256k1_ge) * (1 + 4 * runs));
+ secp256k1_gej *gej = (secp256k1_gej *)malloc(sizeof(secp256k1_gej) * (1 + 4 * runs));
+ secp256k1_fe *zinv = (secp256k1_fe *)malloc(sizeof(secp256k1_fe) * (1 + 4 * runs));
+ secp256k1_fe zf;
+ secp256k1_fe zfi2, zfi3;
+
+ secp256k1_gej_set_infinity(&gej[0]);
+ secp256k1_ge_clear(&ge[0]);
+ secp256k1_ge_set_gej_var(&ge[0], &gej[0]);
+ for (i = 0; i < runs; i++) {
+ int j;
+ secp256k1_ge g;
+ random_group_element_test(&g);
+#ifdef USE_ENDOMORPHISM
+ if (i >= runs - 2) {
+ secp256k1_ge_mul_lambda(&g, &ge[1]);
+ }
+ if (i >= runs - 1) {
+ secp256k1_ge_mul_lambda(&g, &g);
+ }
+#endif
+ ge[1 + 4 * i] = g;
+ ge[2 + 4 * i] = g;
+ secp256k1_ge_neg(&ge[3 + 4 * i], &g);
+ secp256k1_ge_neg(&ge[4 + 4 * i], &g);
+ secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]);
+ random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]);
+ secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]);
+ random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]);
+ for (j = 0; j < 4; j++) {
+ random_field_element_magnitude(&ge[1 + j + 4 * i].x);
+ random_field_element_magnitude(&ge[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].x);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].y);
+ random_field_element_magnitude(&gej[1 + j + 4 * i].z);
+ }
+ }
+
+ /* Compute z inverses. */
+ {
+ secp256k1_fe *zs = malloc(sizeof(secp256k1_fe) * (1 + 4 * runs));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ if (i == 0) {
+ /* The point at infinity does not have a meaningful z inverse. Any should do. */
+ do {
+ random_field_element_test(&zs[i]);
+ } while(secp256k1_fe_is_zero(&zs[i]));
+ } else {
+ zs[i] = gej[i].z;
+ }
+ }
+ secp256k1_fe_inv_all_var(zinv, zs, 4 * runs + 1);
+ free(zs);
+ }
+
+ /* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */
+ do {
+ random_field_element_test(&zf);
+ } while(secp256k1_fe_is_zero(&zf));
+ random_field_element_magnitude(&zf);
+ secp256k1_fe_inv_var(&zfi3, &zf);
+ secp256k1_fe_sqr(&zfi2, &zfi3);
+ secp256k1_fe_mul(&zfi3, &zfi3, &zfi2);
+
+ for (i1 = 0; i1 < 1 + 4 * runs; i1++) {
+ int i2;
+ for (i2 = 0; i2 < 1 + 4 * runs; i2++) {
+ /* Compute reference result using gej + gej (var). */
+ secp256k1_gej refj, resj;
+ secp256k1_ge ref;
+ secp256k1_fe zr;
+ secp256k1_gej_add_var(&refj, &gej[i1], &gej[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ /* Check Z ratio. */
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&refj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &refj.z));
+ }
+ secp256k1_ge_set_gej_var(&ref, &refj);
+
+ /* Test gej + ge with Z ratio result (var). */
+ secp256k1_gej_add_ge_var(&resj, &gej[i1], &ge[i2], secp256k1_gej_is_infinity(&gej[i1]) ? NULL : &zr);
+ ge_equals_gej(&ref, &resj);
+ if (!secp256k1_gej_is_infinity(&gej[i1]) && !secp256k1_gej_is_infinity(&resj)) {
+ secp256k1_fe zrz; secp256k1_fe_mul(&zrz, &zr, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zrz, &resj.z));
+ }
+
+ /* Test gej + ge (var, with additional Z factor). */
+ {
+ secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */
+ secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2);
+ secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3);
+ random_field_element_magnitude(&ge2_zfi.x);
+ random_field_element_magnitude(&ge2_zfi.y);
+ secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test gej + ge (const). */
+ if (i2 != 0) {
+ /* secp256k1_gej_add_ge does not support its second argument being infinity. */
+ secp256k1_gej_add_ge(&resj, &gej[i1], &ge[i2]);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test doubling (var). */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 == ((i2 + 3)%4)/2)) {
+ secp256k1_fe zr2;
+ /* Normal doubling with Z ratio result. */
+ secp256k1_gej_double_var(&resj, &gej[i1], &zr2);
+ ge_equals_gej(&ref, &resj);
+ /* Check Z ratio. */
+ secp256k1_fe_mul(&zr2, &zr2, &gej[i1].z);
+ CHECK(secp256k1_fe_equal_var(&zr2, &resj.z));
+ /* Normal doubling. */
+ secp256k1_gej_double_var(&resj, &gej[i2], NULL);
+ ge_equals_gej(&ref, &resj);
+ }
+
+ /* Test adding opposites. */
+ if ((i1 == 0 && i2 == 0) || ((i1 + 3)/4 == (i2 + 3)/4 && ((i1 + 3)%4)/2 != ((i2 + 3)%4)/2)) {
+ CHECK(secp256k1_ge_is_infinity(&ref));
+ }
+
+ /* Test adding infinity. */
+ if (i1 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i1]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i1]));
+ ge_equals_gej(&ref, &gej[i2]);
+ }
+ if (i2 == 0) {
+ CHECK(secp256k1_ge_is_infinity(&ge[i2]));
+ CHECK(secp256k1_gej_is_infinity(&gej[i2]));
+ ge_equals_gej(&ref, &gej[i1]);
+ }
+ }
+ }
+
+ /* Test adding all points together in random order equals infinity. */
+ {
+ secp256k1_gej sum = SECP256K1_GEJ_CONST_INFINITY;
+ secp256k1_gej *gej_shuffled = (secp256k1_gej *)malloc((4 * runs + 1) * sizeof(secp256k1_gej));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ gej_shuffled[i] = gej[i];
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ int swap = i + secp256k1_rand_int(4 * runs + 1 - i);
+ if (swap != i) {
+ secp256k1_gej t = gej_shuffled[i];
+ gej_shuffled[i] = gej_shuffled[swap];
+ gej_shuffled[swap] = t;
+ }
+ }
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_gej_add_var(&sum, &sum, &gej_shuffled[i], NULL);
+ }
+ CHECK(secp256k1_gej_is_infinity(&sum));
+ free(gej_shuffled);
+ }
+
+ /* Test batch gej -> ge conversion with and without known z ratios. */
+ {
+ secp256k1_fe *zr = (secp256k1_fe *)malloc((4 * runs + 1) * sizeof(secp256k1_fe));
+ secp256k1_ge *ge_set_table = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge));
+ secp256k1_ge *ge_set_all = (secp256k1_ge *)malloc((4 * runs + 1) * sizeof(secp256k1_ge));
+ for (i = 0; i < 4 * runs + 1; i++) {
+ /* Compute gej[i + 1].z / gez[i].z (with gej[n].z taken to be 1). */
+ if (i < 4 * runs) {
+ secp256k1_fe_mul(&zr[i + 1], &zinv[i], &gej[i + 1].z);
+ }
+ }
+ secp256k1_ge_set_table_gej_var(ge_set_table, gej, zr, 4 * runs + 1);
+ secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1, &ctx->error_callback);
+ for (i = 0; i < 4 * runs + 1; i++) {
+ secp256k1_fe s;
+ random_fe_non_zero(&s);
+ secp256k1_gej_rescale(&gej[i], &s);
+ ge_equals_gej(&ge_set_table[i], &gej[i]);
+ ge_equals_gej(&ge_set_all[i], &gej[i]);
+ }
+ free(ge_set_table);
+ free(ge_set_all);
+ free(zr);
+ }
+
+ free(ge);
+ free(gej);
+ free(zinv);
+}
+
+void test_add_neg_y_diff_x(void) {
+ /* The point of this test is to check that we can add two points
+ * whose y-coordinates are negatives of each other but whose x
+ * coordinates differ. If the x-coordinates were the same, these
+ * points would be negatives of each other and their sum is
+ * infinity. This is cool because it "covers up" any degeneracy
+ * in the addition algorithm that would cause the xy coordinates
+ * of the sum to be wrong (since infinity has no xy coordinates).
+ * HOWEVER, if the x-coordinates are different, infinity is the
+ * wrong answer, and such degeneracies are exposed. This is the
+ * root of https://github.com/bitcoin-core/secp256k1/issues/257
+ * which this test is a regression test for.
+ *
+ * These points were generated in sage as
+ * # secp256k1 params
+ * F = FiniteField (0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F)
+ * C = EllipticCurve ([F (0), F (7)])
+ * G = C.lift_x(0x79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798)
+ * N = FiniteField(G.order())
+ *
+ * # endomorphism values (lambda is 1^{1/3} in N, beta is 1^{1/3} in F)
+ * x = polygen(N)
+ * lam = (1 - x^3).roots()[1][0]
+ *
+ * # random "bad pair"
+ * P = C.random_element()
+ * Q = -int(lam) * P
+ * print " P: %x %x" % P.xy()
+ * print " Q: %x %x" % Q.xy()
+ * print "P + Q: %x %x" % (P + Q).xy()
+ */
+ secp256k1_gej aj = SECP256K1_GEJ_CONST(
+ 0x8d24cd95, 0x0a355af1, 0x3c543505, 0x44238d30,
+ 0x0643d79f, 0x05a59614, 0x2f8ec030, 0xd58977cb,
+ 0x001e337a, 0x38093dcd, 0x6c0f386d, 0x0b1293a8,
+ 0x4d72c879, 0xd7681924, 0x44e6d2f3, 0x9190117d
+ );
+ secp256k1_gej bj = SECP256K1_GEJ_CONST(
+ 0xc7b74206, 0x1f788cd9, 0xabd0937d, 0x164a0d86,
+ 0x95f6ff75, 0xf19a4ce9, 0xd013bd7b, 0xbf92d2a7,
+ 0xffe1cc85, 0xc7f6c232, 0x93f0c792, 0xf4ed6c57,
+ 0xb28d3786, 0x2897e6db, 0xbb192d0b, 0x6e6feab2
+ );
+ secp256k1_gej sumj = SECP256K1_GEJ_CONST(
+ 0x671a63c0, 0x3efdad4c, 0x389a7798, 0x24356027,
+ 0xb3d69010, 0x278625c3, 0x5c86d390, 0x184a8f7a,
+ 0x5f6409c2, 0x2ce01f2b, 0x511fd375, 0x25071d08,
+ 0xda651801, 0x70e95caf, 0x8f0d893c, 0xbed8fbbe
+ );
+ secp256k1_ge b;
+ secp256k1_gej resj;
+ secp256k1_ge res;
+ secp256k1_ge_set_gej(&b, &bj);
+
+ secp256k1_gej_add_var(&resj, &aj, &bj, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge(&resj, &aj, &b);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+
+ secp256k1_gej_add_ge_var(&resj, &aj, &b, NULL);
+ secp256k1_ge_set_gej(&res, &resj);
+ ge_equals_gej(&res, &sumj);
+}
+
+void run_ge(void) {
+ int i;
+ for (i = 0; i < count * 32; i++) {
+ test_ge();
+ }
+ test_add_neg_y_diff_x();
+}
+
+void test_ec_combine(void) {
+ secp256k1_scalar sum = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_pubkey data[6];
+ const secp256k1_pubkey* d[6];
+ secp256k1_pubkey sd;
+ secp256k1_pubkey sd2;
+ secp256k1_gej Qj;
+ secp256k1_ge Q;
+ int i;
+ for (i = 1; i <= 6; i++) {
+ secp256k1_scalar s;
+ random_scalar_order_test(&s);
+ secp256k1_scalar_add(&sum, &sum, &s);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &s);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&data[i - 1], &Q);
+ d[i - 1] = &data[i - 1];
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &Qj, &sum);
+ secp256k1_ge_set_gej(&Q, &Qj);
+ secp256k1_pubkey_save(&sd, &Q);
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &sd2, d, i) == 1);
+ CHECK(memcmp(&sd, &sd2, sizeof(sd)) == 0);
+ }
+}
+
+void run_ec_combine(void) {
+ int i;
+ for (i = 0; i < count * 8; i++) {
+ test_ec_combine();
+ }
+}
+
+void test_group_decompress(const secp256k1_fe* x) {
+ /* The input itself, normalized. */
+ secp256k1_fe fex = *x;
+ secp256k1_fe fez;
+ /* Results of set_xquad_var, set_xo_var(..., 0), set_xo_var(..., 1). */
+ secp256k1_ge ge_quad, ge_even, ge_odd;
+ secp256k1_gej gej_quad;
+ /* Return values of the above calls. */
+ int res_quad, res_even, res_odd;
+
+ secp256k1_fe_normalize_var(&fex);
+
+ res_quad = secp256k1_ge_set_xquad(&ge_quad, &fex);
+ res_even = secp256k1_ge_set_xo_var(&ge_even, &fex, 0);
+ res_odd = secp256k1_ge_set_xo_var(&ge_odd, &fex, 1);
+
+ CHECK(res_quad == res_even);
+ CHECK(res_quad == res_odd);
+
+ if (res_quad) {
+ secp256k1_fe_normalize_var(&ge_quad.x);
+ secp256k1_fe_normalize_var(&ge_odd.x);
+ secp256k1_fe_normalize_var(&ge_even.x);
+ secp256k1_fe_normalize_var(&ge_quad.y);
+ secp256k1_fe_normalize_var(&ge_odd.y);
+ secp256k1_fe_normalize_var(&ge_even.y);
+
+ /* No infinity allowed. */
+ CHECK(!ge_quad.infinity);
+ CHECK(!ge_even.infinity);
+ CHECK(!ge_odd.infinity);
+
+ /* Check that the x coordinates check out. */
+ CHECK(secp256k1_fe_equal_var(&ge_quad.x, x));
+ CHECK(secp256k1_fe_equal_var(&ge_even.x, x));
+ CHECK(secp256k1_fe_equal_var(&ge_odd.x, x));
+
+ /* Check that the Y coordinate result in ge_quad is a square. */
+ CHECK(secp256k1_fe_is_quad_var(&ge_quad.y));
+
+ /* Check odd/even Y in ge_odd, ge_even. */
+ CHECK(secp256k1_fe_is_odd(&ge_odd.y));
+ CHECK(!secp256k1_fe_is_odd(&ge_even.y));
+
+ /* Check secp256k1_gej_has_quad_y_var. */
+ secp256k1_gej_set_ge(&gej_quad, &ge_quad);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ do {
+ random_fe_test(&fez);
+ } while (secp256k1_fe_is_zero(&fez));
+ secp256k1_gej_rescale(&gej_quad, &fez);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ secp256k1_gej_neg(&gej_quad, &gej_quad);
+ CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad));
+ do {
+ random_fe_test(&fez);
+ } while (secp256k1_fe_is_zero(&fez));
+ secp256k1_gej_rescale(&gej_quad, &fez);
+ CHECK(!secp256k1_gej_has_quad_y_var(&gej_quad));
+ secp256k1_gej_neg(&gej_quad, &gej_quad);
+ CHECK(secp256k1_gej_has_quad_y_var(&gej_quad));
+ }
+}
+
+void run_group_decompress(void) {
+ int i;
+ for (i = 0; i < count * 4; i++) {
+ secp256k1_fe fe;
+ random_fe_test(&fe);
+ test_group_decompress(&fe);
+ }
+}
+
+/***** ECMULT TESTS *****/
+
+void run_ecmult_chain(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_gej a = SECP256K1_GEJ_CONST(
+ 0x8b30bbe9, 0xae2a9906, 0x96b22f67, 0x0709dff3,
+ 0x727fd8bc, 0x04d3362c, 0x6c7bf458, 0xe2846004,
+ 0xa357ae91, 0x5c4a6528, 0x1309edf2, 0x0504740f,
+ 0x0eb33439, 0x90216b4f, 0x81063cb6, 0x5f2f7e0f
+ );
+ /* two random initial factors xn and gn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x84cc5452, 0xf7fde1ed, 0xb4d38a8c, 0xe9b1b84c,
+ 0xcef31f14, 0x6e569be9, 0x705d357a, 0x42985407
+ );
+ secp256k1_scalar gn = SECP256K1_SCALAR_CONST(
+ 0xa1e58d22, 0x553dcd42, 0xb2398062, 0x5d4c57a9,
+ 0x6e9323d4, 0x2b3152e5, 0xca2c3990, 0xedc7c9de
+ );
+ /* two small multipliers to be applied to xn and gn in every iteration: */
+ static const secp256k1_scalar xf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x1337);
+ static const secp256k1_scalar gf = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0x7113);
+ /* accumulators with the resulting coefficients to A and G */
+ secp256k1_scalar ae = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar ge = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ /* actual points */
+ secp256k1_gej x;
+ secp256k1_gej x2;
+ int i;
+
+ /* the point being computed */
+ x = a;
+ for (i = 0; i < 200*count; i++) {
+ /* in each iteration, compute X = xn*X + gn*G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x, &x, &xn, &gn);
+ /* also compute ae and ge: the actual accumulated factors for A and G */
+ /* if X was (ae*A+ge*G), xn*X + gn*G results in (xn*ae*A + (xn*ge+gn)*G) */
+ secp256k1_scalar_mul(&ae, &ae, &xn);
+ secp256k1_scalar_mul(&ge, &ge, &xn);
+ secp256k1_scalar_add(&ge, &ge, &gn);
+ /* modify xn and gn */
+ secp256k1_scalar_mul(&xn, &xn, &xf);
+ secp256k1_scalar_mul(&gn, &gn, &gf);
+
+ /* verify */
+ if (i == 19999) {
+ /* expected result after 19999 iterations */
+ secp256k1_gej rp = SECP256K1_GEJ_CONST(
+ 0xD6E96687, 0xF9B10D09, 0x2A6F3543, 0x9D86CEBE,
+ 0xA4535D0D, 0x409F5358, 0x6440BD74, 0xB933E830,
+ 0xB95CBCA2, 0xC77DA786, 0x539BE8FD, 0x53354D2D,
+ 0x3B4F566A, 0xE6580454, 0x07ED6015, 0xEE1B2A88
+ );
+
+ secp256k1_gej_neg(&rp, &rp);
+ secp256k1_gej_add_var(&rp, &rp, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&rp));
+ }
+ }
+ /* redo the computation, but directly with the resulting ae and ge coefficients: */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &x2, &a, &ae, &ge);
+ secp256k1_gej_neg(&x2, &x2);
+ secp256k1_gej_add_var(&x2, &x2, &x, NULL);
+ CHECK(secp256k1_gej_is_infinity(&x2));
+}
+
+void test_point_times_order(const secp256k1_gej *point) {
+ /* X * (point + G) + (order-X) * (pointer + G) = 0 */
+ secp256k1_scalar x;
+ secp256k1_scalar nx;
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_gej res1, res2;
+ secp256k1_ge res3;
+ unsigned char pub[65];
+ size_t psize = 65;
+ random_scalar_order_test(&x);
+ secp256k1_scalar_negate(&nx, &x);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &x, &x); /* calc res1 = x * point + x * G; */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
+ secp256k1_gej_add_var(&res1, &res1, &res2, NULL);
+ CHECK(secp256k1_gej_is_infinity(&res1));
+ CHECK(secp256k1_gej_is_valid_var(&res1) == 0);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ CHECK(secp256k1_ge_is_valid_var(&res3) == 0);
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 0) == 0);
+ psize = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&res3, pub, &psize, 1) == 0);
+ /* check zero/one edge cases */
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res3));
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &one, &zero);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_gej(&res3, point);
+ secp256k1_ecmult(&ctx->ecmult_ctx, &res1, point, &zero, &one);
+ secp256k1_ge_set_gej(&res3, &res1);
+ ge_equals_ge(&res3, &secp256k1_ge_const_g);
+}
+
+void run_point_times_order(void) {
+ int i;
+ secp256k1_fe x = SECP256K1_FE_CONST(0, 0, 0, 0, 0, 0, 0, 2);
+ static const secp256k1_fe xr = SECP256K1_FE_CONST(
+ 0x7603CB59, 0xB0EF6C63, 0xFE608479, 0x2A0C378C,
+ 0xDB3233A8, 0x0F8A9A09, 0xA877DEAD, 0x31B38C45
+ );
+ for (i = 0; i < 500; i++) {
+ secp256k1_ge p;
+ if (secp256k1_ge_set_xo_var(&p, &x, 1)) {
+ secp256k1_gej j;
+ CHECK(secp256k1_ge_is_valid_var(&p));
+ secp256k1_gej_set_ge(&j, &p);
+ CHECK(secp256k1_gej_is_valid_var(&j));
+ test_point_times_order(&j);
+ }
+ secp256k1_fe_sqr(&x, &x);
+ }
+ secp256k1_fe_normalize_var(&x);
+ CHECK(secp256k1_fe_equal_var(&x, &xr));
+}
+
+void ecmult_const_random_mult(void) {
+ /* random starting point A (on the curve) */
+ secp256k1_ge a = SECP256K1_GE_CONST(
+ 0x6d986544, 0x57ff52b8, 0xcf1b8126, 0x5b802a5b,
+ 0xa97f9263, 0xb1e88044, 0x93351325, 0x91bc450a,
+ 0x535c59f7, 0x325e5d2b, 0xc391fbe8, 0x3c12787c,
+ 0x337e4a98, 0xe82a9011, 0x0123ba37, 0xdd769c7d
+ );
+ /* random initial factor xn */
+ secp256k1_scalar xn = SECP256K1_SCALAR_CONST(
+ 0x649d4f77, 0xc4242df7, 0x7f2079c9, 0x14530327,
+ 0xa31b876a, 0xd2d8ce2a, 0x2236d5c6, 0xd7b2029b
+ );
+ /* expected xn * A (from sage) */
+ secp256k1_ge expected_b = SECP256K1_GE_CONST(
+ 0x23773684, 0x4d209dc7, 0x098a786f, 0x20d06fcd,
+ 0x070a38bf, 0xc11ac651, 0x03004319, 0x1e2a8786,
+ 0xed8c3b8e, 0xc06dd57b, 0xd06ea66e, 0x45492b0f,
+ 0xb84e4e1b, 0xfb77e21f, 0x96baae2a, 0x63dec956
+ );
+ secp256k1_gej b;
+ secp256k1_ecmult_const(&b, &a, &xn);
+
+ CHECK(secp256k1_ge_is_valid_var(&a));
+ ge_equals_gej(&expected_b, &b);
+}
+
+void ecmult_const_commutativity(void) {
+ secp256k1_scalar a;
+ secp256k1_scalar b;
+ secp256k1_gej res1;
+ secp256k1_gej res2;
+ secp256k1_ge mid1;
+ secp256k1_ge mid2;
+ random_scalar_order_test(&a);
+ random_scalar_order_test(&b);
+
+ secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a);
+ secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ secp256k1_ecmult_const(&res1, &mid1, &b);
+ secp256k1_ecmult_const(&res2, &mid2, &a);
+ secp256k1_ge_set_gej(&mid1, &res1);
+ secp256k1_ge_set_gej(&mid2, &res2);
+ ge_equals_ge(&mid1, &mid2);
+}
+
+void ecmult_const_mult_zero_one(void) {
+ secp256k1_scalar zero = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 0);
+ secp256k1_scalar one = SECP256K1_SCALAR_CONST(0, 0, 0, 0, 0, 0, 0, 1);
+ secp256k1_scalar negone;
+ secp256k1_gej res1;
+ secp256k1_ge res2;
+ secp256k1_ge point;
+ secp256k1_scalar_negate(&negone, &one);
+
+ random_group_element_test(&point);
+ secp256k1_ecmult_const(&res1, &point, &zero);
+ secp256k1_ge_set_gej(&res2, &res1);
+ CHECK(secp256k1_ge_is_infinity(&res2));
+ secp256k1_ecmult_const(&res1, &point, &one);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+ secp256k1_ecmult_const(&res1, &point, &negone);
+ secp256k1_gej_neg(&res1, &res1);
+ secp256k1_ge_set_gej(&res2, &res1);
+ ge_equals_ge(&res2, &point);
+}
+
+void ecmult_const_chain_multiply(void) {
+ /* Check known result (randomly generated test problem from sage) */
+ const secp256k1_scalar scalar = SECP256K1_SCALAR_CONST(
+ 0x4968d524, 0x2abf9b7a, 0x466abbcf, 0x34b11b6d,
+ 0xcd83d307, 0x827bed62, 0x05fad0ce, 0x18fae63b
+ );
+ const secp256k1_gej expected_point = SECP256K1_GEJ_CONST(
+ 0x5494c15d, 0x32099706, 0xc2395f94, 0x348745fd,
+ 0x757ce30e, 0x4e8c90fb, 0xa2bad184, 0xf883c69f,
+ 0x5d195d20, 0xe191bf7f, 0x1be3e55f, 0x56a80196,
+ 0x6071ad01, 0xf1462f66, 0xc997fa94, 0xdb858435
+ );
+ secp256k1_gej point;
+ secp256k1_ge res;
+ int i;
+
+ secp256k1_gej_set_ge(&point, &secp256k1_ge_const_g);
+ for (i = 0; i < 100; ++i) {
+ secp256k1_ge tmp;
+ secp256k1_ge_set_gej(&tmp, &point);
+ secp256k1_ecmult_const(&point, &tmp, &scalar);
+ }
+ secp256k1_ge_set_gej(&res, &point);
+ ge_equals_gej(&res, &expected_point);
+}
+
+void run_ecmult_const_tests(void) {
+ ecmult_const_mult_zero_one();
+ ecmult_const_random_mult();
+ ecmult_const_commutativity();
+ ecmult_const_chain_multiply();
+}
+
+void test_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, two, t;
+ int wnaf[256];
+ int zeroes = -1;
+ int i;
+ int bits;
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&two, 2);
+ bits = secp256k1_ecmult_wnaf(wnaf, 256, number, w);
+ CHECK(bits <= 256);
+ for (i = bits-1; i >= 0; i--) {
+ int v = wnaf[i];
+ secp256k1_scalar_mul(&x, &x, &two);
+ if (v) {
+ CHECK(zeroes == -1 || zeroes >= w-1); /* check that distance between non-zero elements is at least w-1 */
+ zeroes=0;
+ CHECK((v & 1) == 1); /* check non-zero elements are odd */
+ CHECK(v <= (1 << (w-1)) - 1); /* check range below */
+ CHECK(v >= -(1 << (w-1)) - 1); /* check range above */
+ } else {
+ CHECK(zeroes != -1); /* check that no unnecessary zero padding exists */
+ zeroes++;
+ }
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ CHECK(secp256k1_scalar_eq(&x, number)); /* check that wnaf represents number */
+}
+
+void test_constant_wnaf_negate(const secp256k1_scalar *number) {
+ secp256k1_scalar neg1 = *number;
+ secp256k1_scalar neg2 = *number;
+ int sign1 = 1;
+ int sign2 = 1;
+
+ if (!secp256k1_scalar_get_bits(&neg1, 0, 1)) {
+ secp256k1_scalar_negate(&neg1, &neg1);
+ sign1 = -1;
+ }
+ sign2 = secp256k1_scalar_cond_negate(&neg2, secp256k1_scalar_is_even(&neg2));
+ CHECK(sign1 == sign2);
+ CHECK(secp256k1_scalar_eq(&neg1, &neg2));
+}
+
+void test_constant_wnaf(const secp256k1_scalar *number, int w) {
+ secp256k1_scalar x, shift;
+ int wnaf[256] = {0};
+ int i;
+ int skew;
+ secp256k1_scalar num = *number;
+
+ secp256k1_scalar_set_int(&x, 0);
+ secp256k1_scalar_set_int(&shift, 1 << w);
+ /* With USE_ENDOMORPHISM on we only consider 128-bit numbers */
+#ifdef USE_ENDOMORPHISM
+ for (i = 0; i < 16; ++i) {
+ secp256k1_scalar_shr_int(&num, 8);
+ }
+#endif
+ skew = secp256k1_wnaf_const(wnaf, num, w);
+
+ for (i = WNAF_SIZE(w); i >= 0; --i) {
+ secp256k1_scalar t;
+ int v = wnaf[i];
+ CHECK(v != 0); /* check nonzero */
+ CHECK(v & 1); /* check parity */
+ CHECK(v > -(1 << w)); /* check range above */
+ CHECK(v < (1 << w)); /* check range below */
+
+ secp256k1_scalar_mul(&x, &x, &shift);
+ if (v >= 0) {
+ secp256k1_scalar_set_int(&t, v);
+ } else {
+ secp256k1_scalar_set_int(&t, -v);
+ secp256k1_scalar_negate(&t, &t);
+ }
+ secp256k1_scalar_add(&x, &x, &t);
+ }
+ /* Skew num because when encoding numbers as odd we use an offset */
+ secp256k1_scalar_cadd_bit(&num, skew == 2, 1);
+ CHECK(secp256k1_scalar_eq(&x, &num));
+}
+
+void run_wnaf(void) {
+ int i;
+ secp256k1_scalar n = {{0}};
+
+ /* Sanity check: 1 and 2 are the smallest odd and even numbers and should
+ * have easier-to-diagnose failure modes */
+ n.d[0] = 1;
+ test_constant_wnaf(&n, 4);
+ n.d[0] = 2;
+ test_constant_wnaf(&n, 4);
+ /* Random tests */
+ for (i = 0; i < count; i++) {
+ random_scalar_order(&n);
+ test_wnaf(&n, 4+(i%10));
+ test_constant_wnaf_negate(&n);
+ test_constant_wnaf(&n, 4 + (i % 10));
+ }
+ secp256k1_scalar_set_int(&n, 0);
+ CHECK(secp256k1_scalar_cond_negate(&n, 1) == -1);
+ CHECK(secp256k1_scalar_is_zero(&n));
+ CHECK(secp256k1_scalar_cond_negate(&n, 0) == 1);
+ CHECK(secp256k1_scalar_is_zero(&n));
+}
+
+void test_ecmult_constants(void) {
+ /* Test ecmult_gen() for [0..36) and [order-36..0). */
+ secp256k1_scalar x;
+ secp256k1_gej r;
+ secp256k1_ge ng;
+ int i;
+ int j;
+ secp256k1_ge_neg(&ng, &secp256k1_ge_const_g);
+ for (i = 0; i < 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&secp256k1_ge_const_g, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &ng);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+ for (i = 1; i <= 36; i++ ) {
+ secp256k1_scalar_set_int(&x, i);
+ secp256k1_scalar_negate(&x, &x);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &r, &x);
+ for (j = 0; j < i; j++) {
+ if (j == i - 1) {
+ ge_equals_gej(&ng, &r);
+ }
+ secp256k1_gej_add_ge(&r, &r, &secp256k1_ge_const_g);
+ }
+ CHECK(secp256k1_gej_is_infinity(&r));
+ }
+}
+
+void run_ecmult_constants(void) {
+ test_ecmult_constants();
+}
+
+void test_ecmult_gen_blind(void) {
+ /* Test ecmult_gen() blinding and confirm that the blinding changes, the affine points match, and the z's don't match. */
+ secp256k1_scalar key;
+ secp256k1_scalar b;
+ unsigned char seed32[32];
+ secp256k1_gej pgej;
+ secp256k1_gej pgej2;
+ secp256k1_gej i;
+ secp256k1_ge pge;
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej, &key);
+ secp256k1_rand256(seed32);
+ b = ctx->ecmult_gen_ctx.blind;
+ i = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, seed32);
+ CHECK(!secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pgej2, &key);
+ CHECK(!gej_xyz_equals_gej(&pgej, &pgej2));
+ CHECK(!gej_xyz_equals_gej(&i, &ctx->ecmult_gen_ctx.initial));
+ secp256k1_ge_set_gej(&pge, &pgej);
+ ge_equals_gej(&pge, &pgej2);
+}
+
+void test_ecmult_gen_blind_reset(void) {
+ /* Test ecmult_gen() blinding reset and confirm that the blinding is consistent. */
+ secp256k1_scalar b;
+ secp256k1_gej initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ b = ctx->ecmult_gen_ctx.blind;
+ initial = ctx->ecmult_gen_ctx.initial;
+ secp256k1_ecmult_gen_blind(&ctx->ecmult_gen_ctx, 0);
+ CHECK(secp256k1_scalar_eq(&b, &ctx->ecmult_gen_ctx.blind));
+ CHECK(gej_xyz_equals_gej(&initial, &ctx->ecmult_gen_ctx.initial));
+}
+
+void run_ecmult_gen_blind(void) {
+ int i;
+ test_ecmult_gen_blind_reset();
+ for (i = 0; i < 10; i++) {
+ test_ecmult_gen_blind();
+ }
+}
+
+#ifdef USE_ENDOMORPHISM
+/***** ENDOMORPHISH TESTS *****/
+void test_scalar_split(void) {
+ secp256k1_scalar full;
+ secp256k1_scalar s1, slam;
+ const unsigned char zero[32] = {0};
+ unsigned char tmp[32];
+
+ random_scalar_order_test(&full);
+ secp256k1_scalar_split_lambda(&s1, &slam, &full);
+
+ /* check that both are <= 128 bits in size */
+ if (secp256k1_scalar_is_high(&s1)) {
+ secp256k1_scalar_negate(&s1, &s1);
+ }
+ if (secp256k1_scalar_is_high(&slam)) {
+ secp256k1_scalar_negate(&slam, &slam);
+ }
+
+ secp256k1_scalar_get_b32(tmp, &s1);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+ secp256k1_scalar_get_b32(tmp, &slam);
+ CHECK(memcmp(zero, tmp, 16) == 0);
+}
+
+void run_endomorphism_tests(void) {
+ test_scalar_split();
+}
+#endif
+
+void ec_pubkey_parse_pointtest(const unsigned char *input, int xvalid, int yvalid) {
+ unsigned char pubkeyc[65];
+ secp256k1_pubkey pubkey;
+ secp256k1_ge ge;
+ size_t pubkeyclen;
+ int32_t ecount;
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ for (pubkeyclen = 3; pubkeyclen <= 65; pubkeyclen++) {
+ /* Smaller sizes are tested exhaustively elsewhere. */
+ int32_t i;
+ memcpy(&pubkeyc[1], input, 64);
+ VG_UNDEF(&pubkeyc[pubkeyclen], 65 - pubkeyclen);
+ for (i = 0; i < 256; i++) {
+ /* Try all type bytes. */
+ int xpass;
+ int ypass;
+ int ysign;
+ pubkeyc[0] = i;
+ /* What sign does this point have? */
+ ysign = (input[63] & 1) + 2;
+ /* For the current type (i) do we expect parsing to work? Handled all of compressed/uncompressed/hybrid. */
+ xpass = xvalid && (pubkeyclen == 33) && ((i & 254) == 2);
+ /* Do we expect a parse and re-serialize as uncompressed to give a matching y? */
+ ypass = xvalid && yvalid && ((i & 4) == ((pubkeyclen == 65) << 2)) &&
+ ((i == 4) || ((i & 251) == ysign)) && ((pubkeyclen == 33) || (pubkeyclen == 65));
+ if (xpass || ypass) {
+ /* These cases must parse. */
+ unsigned char pubkeyo[65];
+ size_t outl;
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ ecount = 0;
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ outl = 65;
+ VG_UNDEF(pubkeyo, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ VG_CHECK(pubkeyo, outl);
+ CHECK(outl == 33);
+ CHECK(memcmp(&pubkeyo[1], &pubkeyc[1], 32) == 0);
+ CHECK((pubkeyclen != 33) || (pubkeyo[0] == pubkeyc[0]));
+ if (ypass) {
+ /* This test isn't always done because we decode with alternative signs, so the y won't match. */
+ CHECK(pubkeyo[0] == ysign);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ secp256k1_pubkey_save(&pubkey, &ge);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ outl = 65;
+ VG_UNDEF(pubkeyo, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyo, &outl, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1);
+ VG_CHECK(pubkeyo, outl);
+ CHECK(outl == 65);
+ CHECK(pubkeyo[0] == 4);
+ CHECK(memcmp(&pubkeyo[1], input, 64) == 0);
+ }
+ CHECK(ecount == 0);
+ } else {
+ /* These cases must fail to parse. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ }
+ }
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+}
+
+void run_ec_pubkey_parse_test(void) {
+#define SECP256K1_EC_PARSE_TEST_NVALID (12)
+ const unsigned char valid[SECP256K1_EC_PARSE_TEST_NVALID][64] = {
+ {
+ /* Point with leading and trailing zeros in x and y serialization. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x42, 0x52,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x64, 0xef, 0xa1, 0x7b, 0x77, 0x61, 0xe1, 0xe4, 0x27, 0x06, 0x98, 0x9f, 0xb4, 0x83,
+ 0xb8, 0xd2, 0xd4, 0x9b, 0xf7, 0x8f, 0xae, 0x98, 0x03, 0xf0, 0x99, 0xb8, 0x34, 0xed, 0xeb, 0x00
+ },
+ {
+ /* Point with x equal to a 3rd root of unity.*/
+ 0x7a, 0xe9, 0x6a, 0x2b, 0x65, 0x7c, 0x07, 0x10, 0x6e, 0x64, 0x47, 0x9e, 0xac, 0x34, 0x34, 0xe9,
+ 0x9c, 0xf0, 0x49, 0x75, 0x12, 0xf5, 0x89, 0x95, 0xc1, 0x39, 0x6c, 0x28, 0x71, 0x95, 0x01, 0xee,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Point with largest x. (1/2) */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c,
+ 0x0e, 0x99, 0x4b, 0x14, 0xea, 0x72, 0xf8, 0xc3, 0xeb, 0x95, 0xc7, 0x1e, 0xf6, 0x92, 0x57, 0x5e,
+ 0x77, 0x50, 0x58, 0x33, 0x2d, 0x7e, 0x52, 0xd0, 0x99, 0x5c, 0xf8, 0x03, 0x88, 0x71, 0xb6, 0x7d,
+ },
+ {
+ /* Point with largest x. (2/2) */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2c,
+ 0xf1, 0x66, 0xb4, 0xeb, 0x15, 0x8d, 0x07, 0x3c, 0x14, 0x6a, 0x38, 0xe1, 0x09, 0x6d, 0xa8, 0xa1,
+ 0x88, 0xaf, 0xa7, 0xcc, 0xd2, 0x81, 0xad, 0x2f, 0x66, 0xa3, 0x07, 0xfb, 0x77, 0x8e, 0x45, 0xb2,
+ },
+ {
+ /* Point with smallest x. (1/2) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Point with smallest x. (2/2) */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb,
+ 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41,
+ },
+ {
+ /* Point with largest y. (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with largest y. (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with largest y. (3/3) */
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ },
+ {
+ /* Point with smallest y. (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Point with smallest y. (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Point with smallest y. (3/3) */
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01
+ }
+ };
+#define SECP256K1_EC_PARSE_TEST_NXVALID (4)
+ const unsigned char onlyxvalid[SECP256K1_EC_PARSE_TEST_NXVALID][64] = {
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (1/3) */
+ 0x1f, 0xe1, 0xe5, 0xef, 0x3f, 0xce, 0xb5, 0xc1, 0x35, 0xab, 0x77, 0x41, 0x33, 0x3c, 0xe5, 0xa6,
+ 0xe8, 0x0d, 0x68, 0x16, 0x76, 0x53, 0xf6, 0xb2, 0xb2, 0x4b, 0xcb, 0xcf, 0xaa, 0xaf, 0xf5, 0x07,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (2/3) */
+ 0xcb, 0xb0, 0xde, 0xab, 0x12, 0x57, 0x54, 0xf1, 0xfd, 0xb2, 0x03, 0x8b, 0x04, 0x34, 0xed, 0x9c,
+ 0xb3, 0xfb, 0x53, 0xab, 0x73, 0x53, 0x91, 0x12, 0x99, 0x94, 0xa5, 0x35, 0xd9, 0x25, 0xf6, 0x73,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* Valid if y overflow ignored (y = 1 mod p). (3/3)*/
+ 0x14, 0x6d, 0x3b, 0x65, 0xad, 0xd9, 0xf5, 0x4c, 0xcc, 0xa2, 0x85, 0x33, 0xc8, 0x8e, 0x2c, 0xbc,
+ 0x63, 0xf7, 0x44, 0x3e, 0x16, 0x58, 0x78, 0x3a, 0xb4, 0x1f, 0x8e, 0xf9, 0x7c, 0x2a, 0x10, 0xb5,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ },
+ {
+ /* x on curve, y is from y^2 = x^3 + 8. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03
+ }
+ };
+#define SECP256K1_EC_PARSE_TEST_NINVALID (7)
+ const unsigned char invalid[SECP256K1_EC_PARSE_TEST_NINVALID][64] = {
+ {
+ /* x is third root of -8, y is -1 * (x^3+7); also on the curve for y^2 = x^3 + 9. */
+ 0x0a, 0x2d, 0x2b, 0xa9, 0x35, 0x07, 0xf1, 0xdf, 0x23, 0x37, 0x70, 0xc2, 0xa7, 0x97, 0x96, 0x2c,
+ 0xc6, 0x1f, 0x6d, 0x15, 0xda, 0x14, 0xec, 0xd4, 0x7d, 0x8d, 0x27, 0xae, 0x1c, 0xd5, 0xf8, 0x53,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ },
+ {
+ /* Valid if x overflow ignored (x = 1 mod p). */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ 0x42, 0x18, 0xf2, 0x0a, 0xe6, 0xc6, 0x46, 0xb3, 0x63, 0xdb, 0x68, 0x60, 0x58, 0x22, 0xfb, 0x14,
+ 0x26, 0x4c, 0xa8, 0xd2, 0x58, 0x7f, 0xdd, 0x6f, 0xbc, 0x75, 0x0d, 0x58, 0x7e, 0x76, 0xa7, 0xee,
+ },
+ {
+ /* Valid if x overflow ignored (x = 1 mod p). */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x30,
+ 0xbd, 0xe7, 0x0d, 0xf5, 0x19, 0x39, 0xb9, 0x4c, 0x9c, 0x24, 0x97, 0x9f, 0xa7, 0xdd, 0x04, 0xeb,
+ 0xd9, 0xb3, 0x57, 0x2d, 0xa7, 0x80, 0x22, 0x90, 0x43, 0x8a, 0xf2, 0xa6, 0x81, 0x89, 0x54, 0x41,
+ },
+ {
+ /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ 0xf4, 0x84, 0x14, 0x5c, 0xb0, 0x14, 0x9b, 0x82, 0x5d, 0xff, 0x41, 0x2f, 0xa0, 0x52, 0xa8, 0x3f,
+ 0xcb, 0x72, 0xdb, 0x61, 0xd5, 0x6f, 0x37, 0x70, 0xce, 0x06, 0x6b, 0x73, 0x49, 0xa2, 0xaa, 0x28,
+ },
+ {
+ /* x is -1, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 5. */
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe, 0xff, 0xff, 0xfc, 0x2e,
+ 0x0b, 0x7b, 0xeb, 0xa3, 0x4f, 0xeb, 0x64, 0x7d, 0xa2, 0x00, 0xbe, 0xd0, 0x5f, 0xad, 0x57, 0xc0,
+ 0x34, 0x8d, 0x24, 0x9e, 0x2a, 0x90, 0xc8, 0x8f, 0x31, 0xf9, 0x94, 0x8b, 0xb6, 0x5d, 0x52, 0x07,
+ },
+ {
+ /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x8f, 0x53, 0x7e, 0xef, 0xdf, 0xc1, 0x60, 0x6a, 0x07, 0x27, 0xcd, 0x69, 0xb4, 0xa7, 0x33, 0x3d,
+ 0x38, 0xed, 0x44, 0xe3, 0x93, 0x2a, 0x71, 0x79, 0xee, 0xcb, 0x4b, 0x6f, 0xba, 0x93, 0x60, 0xdc,
+ },
+ {
+ /* x is zero, y is the result of the sqrt ladder; also on the curve for y^2 = x^3 - 7. */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x70, 0xac, 0x81, 0x10, 0x20, 0x3e, 0x9f, 0x95, 0xf8, 0xd8, 0x32, 0x96, 0x4b, 0x58, 0xcc, 0xc2,
+ 0xc7, 0x12, 0xbb, 0x1c, 0x6c, 0xd5, 0x8e, 0x86, 0x11, 0x34, 0xb4, 0x8f, 0x45, 0x6c, 0x9b, 0x53
+ }
+ };
+ const unsigned char pubkeyc[66] = {
+ /* Serialization of G. */
+ 0x04, 0x79, 0xBE, 0x66, 0x7E, 0xF9, 0xDC, 0xBB, 0xAC, 0x55, 0xA0, 0x62, 0x95, 0xCE, 0x87, 0x0B,
+ 0x07, 0x02, 0x9B, 0xFC, 0xDB, 0x2D, 0xCE, 0x28, 0xD9, 0x59, 0xF2, 0x81, 0x5B, 0x16, 0xF8, 0x17,
+ 0x98, 0x48, 0x3A, 0xDA, 0x77, 0x26, 0xA3, 0xC4, 0x65, 0x5D, 0xA4, 0xFB, 0xFC, 0x0E, 0x11, 0x08,
+ 0xA8, 0xFD, 0x17, 0xB4, 0x48, 0xA6, 0x85, 0x54, 0x19, 0x9C, 0x47, 0xD0, 0x8F, 0xFB, 0x10, 0xD4,
+ 0xB8, 0x00
+ };
+ unsigned char sout[65];
+ unsigned char shortkey[2];
+ secp256k1_ge ge;
+ secp256k1_pubkey pubkey;
+ size_t len;
+ int32_t i;
+ int32_t ecount;
+ int32_t ecount2;
+ ecount = 0;
+ /* Nothing should be reading this far into pubkeyc. */
+ VG_UNDEF(&pubkeyc[65], 1);
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ /* Zero length claimed, fail, zeroize, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(shortkey, 2);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 0) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* Length one claimed, fail, zeroize, no illegal arg error. */
+ for (i = 0; i < 256 ; i++) {
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ shortkey[0] = i;
+ VG_UNDEF(&shortkey[1], 1);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 1) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ /* Length two claimed, fail, zeroize, no illegal arg error. */
+ for (i = 0; i < 65536 ; i++) {
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ shortkey[0] = i & 255;
+ shortkey[1] = i >> 8;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, shortkey, 2) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ }
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ /* 33 bytes claimed on otherwise valid input starting with 0x04, fail, zeroize output, no illegal arg error. */
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 33) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* NULL pubkey, illegal arg error. Pubkey isn't rewritten before this step, since it's NULL into the parser. */
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, pubkeyc, 65) == 0);
+ CHECK(ecount == 2);
+ /* NULL input string. Illegal arg and zeroize output. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, NULL, 65) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 1);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 2);
+ /* 64 bytes claimed on input starting with 0x04, fail, zeroize output, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 64) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* 66 bytes claimed, fail, zeroize output, no illegal arg error. */
+ memset(&pubkey, 0xfe, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 66) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 0);
+ CHECK(ecount == 1);
+ /* Valid parse. */
+ memset(&pubkey, 0, sizeof(pubkey));
+ ecount = 0;
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, 65) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(ecount == 0);
+ VG_UNDEF(&ge, sizeof(ge));
+ CHECK(secp256k1_pubkey_load(ctx, &ge, &pubkey) == 1);
+ VG_CHECK(&ge.x, sizeof(ge.x));
+ VG_CHECK(&ge.y, sizeof(ge.y));
+ VG_CHECK(&ge.infinity, sizeof(ge.infinity));
+ ge_equals_ge(&secp256k1_ge_const_g, &ge);
+ CHECK(ecount == 0);
+ /* secp256k1_ec_pubkey_serialize illegal args. */
+ ecount = 0;
+ len = 65;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, NULL, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0);
+ CHECK(ecount == 1);
+ CHECK(len == 0);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, NULL, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 0);
+ CHECK(ecount == 2);
+ len = 65;
+ VG_UNDEF(sout, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, NULL, SECP256K1_EC_UNCOMPRESSED) == 0);
+ VG_CHECK(sout, 65);
+ CHECK(ecount == 3);
+ CHECK(len == 0);
+ len = 65;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, ~0) == 0);
+ CHECK(ecount == 4);
+ CHECK(len == 0);
+ len = 65;
+ VG_UNDEF(sout, 65);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, sout, &len, &pubkey, SECP256K1_EC_UNCOMPRESSED) == 1);
+ VG_CHECK(sout, 65);
+ CHECK(ecount == 4);
+ CHECK(len == 65);
+ /* Multiple illegal args. Should still set arg error only once. */
+ ecount = 0;
+ ecount2 = 11;
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0);
+ CHECK(ecount == 1);
+ /* Does the illegal arg callback actually change the behavior? */
+ secp256k1_context_set_illegal_callback(ctx, uncounting_illegal_callback_fn, &ecount2);
+ CHECK(secp256k1_ec_pubkey_parse(ctx, NULL, NULL, 65) == 0);
+ CHECK(ecount == 1);
+ CHECK(ecount2 == 10);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+ /* Try a bunch of prefabbed points with all possible encodings. */
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NVALID; i++) {
+ ec_pubkey_parse_pointtest(valid[i], 1, 1);
+ }
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NXVALID; i++) {
+ ec_pubkey_parse_pointtest(onlyxvalid[i], 1, 0);
+ }
+ for (i = 0; i < SECP256K1_EC_PARSE_TEST_NINVALID; i++) {
+ ec_pubkey_parse_pointtest(invalid[i], 0, 0);
+ }
+}
+
+void run_eckey_edge_case_test(void) {
+ const unsigned char orderc[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41
+ };
+ const unsigned char zeros[sizeof(secp256k1_pubkey)] = {0x00};
+ unsigned char ctmp[33];
+ unsigned char ctmp2[33];
+ secp256k1_pubkey pubkey;
+ secp256k1_pubkey pubkey2;
+ secp256k1_pubkey pubkey_one;
+ secp256k1_pubkey pubkey_negone;
+ const secp256k1_pubkey *pubkeys[3];
+ size_t len;
+ int32_t ecount;
+ /* Group order is too large, reject. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, orderc) == 0);
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, orderc) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* Maximum value is too large, reject. */
+ memset(ctmp, 255, 32);
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* Zero is too small, reject. */
+ memset(ctmp, 0, 32);
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* One must be accepted. */
+ ctmp[31] = 0x01;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ pubkey_one = pubkey;
+ /* Group order + 1 is too large, reject. */
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x42;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 0);
+ memset(&pubkey, 1, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 0);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* -1 must be accepted. */
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ memset(&pubkey, 0, sizeof(pubkey));
+ VG_UNDEF(&pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, ctmp) == 1);
+ VG_CHECK(&pubkey, sizeof(pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ pubkey_negone = pubkey;
+ /* Tweak of zero leaves the value changed. */
+ memset(ctmp2, 0, 32);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, ctmp2) == 1);
+ CHECK(memcmp(orderc, ctmp, 31) == 0 && ctmp[31] == 0x40);
+ memcpy(&pubkey2, &pubkey, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Multiply tweak of zero zeroizes the output. */
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, ctmp2) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, ctmp2) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Overflowing key tweak zeroizes. */
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, orderc) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, orderc) == 0);
+ CHECK(memcmp(zeros, ctmp, 32) == 0);
+ memcpy(ctmp, orderc, 32);
+ ctmp[31] = 0x40;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, orderc) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, orderc) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Private key tweaks results in a key of zero. */
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 0);
+ CHECK(memcmp(zeros, ctmp2, 32) == 0);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ /* Tweak computation wraps and results in a key of 1. */
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp2, ctmp) == 1);
+ CHECK(memcmp(ctmp2, zeros, 31) == 0 && ctmp2[31] == 1);
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Tweak mul * 2 = 1+1. */
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 1);
+ ctmp2[31] = 2;
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ /* Test argument errors. */
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ CHECK(ecount == 0);
+ /* Zeroize pubkey on parse error. */
+ memset(&pubkey, 0, 32);
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(memcmp(&pubkey, zeros, sizeof(pubkey)) == 0);
+ memcpy(&pubkey, &pubkey2, sizeof(pubkey));
+ memset(&pubkey2, 0, 32);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey2, ctmp2) == 0);
+ CHECK(ecount == 2);
+ CHECK(memcmp(&pubkey2, zeros, sizeof(pubkey2)) == 0);
+ /* Plain argument errors. */
+ ecount = 0;
+ CHECK(secp256k1_ec_seckey_verify(ctx, ctmp) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ec_seckey_verify(ctx, NULL) == 0);
+ CHECK(ecount == 1);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 4;
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 4;
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_privkey_tweak_add(ctx, ctmp, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ memset(ctmp2, 0, 32);
+ ctmp2[31] = 1;
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, NULL, ctmp2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_privkey_tweak_mul(ctx, ctmp, NULL) == 0);
+ CHECK(ecount == 2);
+ ecount = 0;
+ CHECK(secp256k1_ec_pubkey_create(ctx, NULL, ctmp) == 0);
+ CHECK(ecount == 1);
+ memset(&pubkey, 1, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 2);
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ /* secp256k1_ec_pubkey_combine tests. */
+ ecount = 0;
+ pubkeys[0] = &pubkey_one;
+ VG_UNDEF(&pubkeys[0], sizeof(secp256k1_pubkey *));
+ VG_UNDEF(&pubkeys[1], sizeof(secp256k1_pubkey *));
+ VG_UNDEF(&pubkeys[2], sizeof(secp256k1_pubkey *));
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 0) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ec_pubkey_combine(ctx, NULL, pubkeys, 1) == 0);
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 2);
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, NULL, 1) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 3);
+ pubkeys[0] = &pubkey_negone;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 1) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ len = 33;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_negone, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ /* Result is infinity. */
+ pubkeys[0] = &pubkey_one;
+ pubkeys[1] = &pubkey_negone;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 0);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) == 0);
+ CHECK(ecount == 3);
+ /* Passes through infinity but comes out one. */
+ pubkeys[2] = &pubkey_one;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 3) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ len = 33;
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp, &len, &pubkey, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, ctmp2, &len, &pubkey_one, SECP256K1_EC_COMPRESSED) == 1);
+ CHECK(memcmp(ctmp, ctmp2, 33) == 0);
+ /* Adds to two. */
+ pubkeys[1] = &pubkey_one;
+ memset(&pubkey, 255, sizeof(secp256k1_pubkey));
+ VG_UNDEF(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(secp256k1_ec_pubkey_combine(ctx, &pubkey, pubkeys, 2) == 1);
+ VG_CHECK(&pubkey, sizeof(secp256k1_pubkey));
+ CHECK(memcmp(&pubkey, zeros, sizeof(secp256k1_pubkey)) > 0);
+ CHECK(ecount == 3);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+}
+
+void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
+ secp256k1_scalar nonce;
+ do {
+ random_scalar_order_test(&nonce);
+ } while(!secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid));
+}
+
+void test_ecdsa_sign_verify(void) {
+ secp256k1_gej pubj;
+ secp256k1_ge pub;
+ secp256k1_scalar one;
+ secp256k1_scalar msg, key;
+ secp256k1_scalar sigr, sigs;
+ int recid;
+ int getrec;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &pubj, &key);
+ secp256k1_ge_set_gej(&pub, &pubj);
+ getrec = secp256k1_rand_bits(1);
+ random_sign(&sigr, &sigs, &key, &msg, getrec?&recid:NULL);
+ if (getrec) {
+ CHECK(recid >= 0 && recid < 4);
+ }
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &pub, &msg));
+}
+
+void run_ecdsa_sign_verify(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_sign_verify();
+ }
+}
+
+/** Dummy nonce generation function that just uses a precomputed nonce, and fails if it is not accepted. Use only for testing. */
+static int precomputed_nonce_function(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ memcpy(nonce32, data, 32);
+ return (counter == 0);
+}
+
+static int nonce_function_test_fail(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that has a fatal error on the first counter value. */
+ if (counter == 0) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 1);
+}
+
+static int nonce_function_test_retry(unsigned char *nonce32, const unsigned char *msg32, const unsigned char *key32, const unsigned char *algo16, void *data, unsigned int counter) {
+ /* Dummy nonce generator that produces unacceptable nonces for the first several counter values. */
+ if (counter < 3) {
+ memset(nonce32, counter==0 ? 0 : 255, 32);
+ if (counter == 2) {
+ nonce32[31]--;
+ }
+ return 1;
+ }
+ if (counter < 5) {
+ static const unsigned char order[] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x41
+ };
+ memcpy(nonce32, order, 32);
+ if (counter == 4) {
+ nonce32[31]++;
+ }
+ return 1;
+ }
+ /* Retry rate of 6979 is negligible esp. as we only call this in deterministic tests. */
+ /* If someone does fine a case where it retries for secp256k1, we'd like to know. */
+ if (counter > 5) {
+ return 0;
+ }
+ return nonce_function_rfc6979(nonce32, msg32, key32, algo16, data, counter - 5);
+}
+
+int is_empty_signature(const secp256k1_ecdsa_signature *sig) {
+ static const unsigned char res[sizeof(secp256k1_ecdsa_signature)] = {0};
+ return memcmp(sig, res, sizeof(secp256k1_ecdsa_signature)) == 0;
+}
+
+void test_ecdsa_end_to_end(void) {
+ unsigned char extra[32] = {0x00};
+ unsigned char privkey[32];
+ unsigned char message[32];
+ unsigned char privkey2[32];
+ secp256k1_ecdsa_signature signature[6];
+ secp256k1_scalar r, s;
+ unsigned char sig[74];
+ size_t siglen = 74;
+ unsigned char pubkeyc[65];
+ size_t pubkeyclen = 65;
+ secp256k1_pubkey pubkey;
+ unsigned char seckey[300];
+ size_t seckeylen = 300;
+
+ /* Generate a random key and message. */
+ {
+ secp256k1_scalar msg, key;
+ random_scalar_order_test(&msg);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(privkey, &key);
+ secp256k1_scalar_get_b32(message, &msg);
+ }
+
+ /* Construct and verify corresponding public key. */
+ CHECK(secp256k1_ec_seckey_verify(ctx, privkey) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, privkey) == 1);
+
+ /* Verify exporting and importing public key. */
+ CHECK(secp256k1_ec_pubkey_serialize(ctx, pubkeyc, &pubkeyclen, &pubkey, secp256k1_rand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
+ memset(&pubkey, 0, sizeof(pubkey));
+ CHECK(secp256k1_ec_pubkey_parse(ctx, &pubkey, pubkeyc, pubkeyclen) == 1);
+
+ /* Verify private key import and export. */
+ CHECK(ec_privkey_export_der(ctx, seckey, &seckeylen, privkey, secp256k1_rand_bits(1) == 1));
+ CHECK(ec_privkey_import_der(ctx, privkey2, seckey, seckeylen) == 1);
+ CHECK(memcmp(privkey, privkey2, 32) == 0);
+
+ /* Optionally tweak the keys using addition. */
+ if (secp256k1_rand_int(3) == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_add(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_add(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Optionally tweak the keys using multiplication. */
+ if (secp256k1_rand_int(3) == 0) {
+ int ret1;
+ int ret2;
+ unsigned char rnd[32];
+ secp256k1_pubkey pubkey2;
+ secp256k1_rand256_test(rnd);
+ ret1 = secp256k1_ec_privkey_tweak_mul(ctx, privkey, rnd);
+ ret2 = secp256k1_ec_pubkey_tweak_mul(ctx, &pubkey, rnd);
+ CHECK(ret1 == ret2);
+ if (ret1 == 0) {
+ return;
+ }
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey2, privkey) == 1);
+ CHECK(memcmp(&pubkey, &pubkey2, sizeof(pubkey)) == 0);
+ }
+
+ /* Sign. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[0], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[4], message, privkey, NULL, NULL) == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[1], message, privkey, NULL, extra) == 1);
+ extra[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[2], message, privkey, NULL, extra) == 1);
+ extra[31] = 0;
+ extra[0] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &signature[3], message, privkey, NULL, extra) == 1);
+ CHECK(memcmp(&signature[0], &signature[4], sizeof(signature[0])) == 0);
+ CHECK(memcmp(&signature[0], &signature[1], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[0], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[2], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[1], &signature[3], sizeof(signature[0])) != 0);
+ CHECK(memcmp(&signature[2], &signature[3], sizeof(signature[0])) != 0);
+ /* Verify. */
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[1], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[2], message, &pubkey) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[3], message, &pubkey) == 1);
+ /* Test lower-S form, malleate, verify and fail, test again, malleate again */
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[0]));
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &signature[0]);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 0);
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5]));
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, &signature[5], &signature[5]));
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
+ secp256k1_scalar_negate(&s, &s);
+ secp256k1_ecdsa_signature_save(&signature[5], &r, &s);
+ CHECK(!secp256k1_ecdsa_signature_normalize(ctx, NULL, &signature[5]));
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[5], message, &pubkey) == 1);
+ CHECK(memcmp(&signature[5], &signature[0], 64) == 0);
+
+ /* Serialize/parse DER and verify again */
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ memset(&signature[0], 0, sizeof(signature[0]));
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 1);
+ /* Serialize/destroy/parse DER and verify again. */
+ siglen = 74;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, sig, &siglen, &signature[0]) == 1);
+ sig[secp256k1_rand_int(siglen)] += 1 + secp256k1_rand_int(255);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &signature[0], sig, siglen) == 0 ||
+ secp256k1_ecdsa_verify(ctx, &signature[0], message, &pubkey) == 0);
+}
+
+void test_random_pubkeys(void) {
+ secp256k1_ge elem;
+ secp256k1_ge elem2;
+ unsigned char in[65];
+ /* Generate some randomly sized pubkeys. */
+ size_t len = secp256k1_rand_bits(2) == 0 ? 65 : 33;
+ if (secp256k1_rand_bits(2) == 0) {
+ len = secp256k1_rand_bits(6);
+ }
+ if (len == 65) {
+ in[0] = secp256k1_rand_bits(1) ? 4 : (secp256k1_rand_bits(1) ? 6 : 7);
+ } else {
+ in[0] = secp256k1_rand_bits(1) ? 2 : 3;
+ }
+ if (secp256k1_rand_bits(3) == 0) {
+ in[0] = secp256k1_rand_bits(8);
+ }
+ if (len > 1) {
+ secp256k1_rand256(&in[1]);
+ }
+ if (len > 33) {
+ secp256k1_rand256(&in[33]);
+ }
+ if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
+ unsigned char out[65];
+ unsigned char firstb;
+ int res;
+ size_t size = len;
+ firstb = in[0];
+ /* If the pubkey can be parsed, it should round-trip... */
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, len == 33));
+ CHECK(size == len);
+ CHECK(memcmp(&in[1], &out[1], len-1) == 0);
+ /* ... except for the type of hybrid inputs. */
+ if ((in[0] != 6) && (in[0] != 7)) {
+ CHECK(in[0] == out[0]);
+ }
+ size = 65;
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, in, &size, 0));
+ CHECK(size == 65);
+ CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
+ ge_equals_ge(&elem,&elem2);
+ /* Check that the X9.62 hybrid type is checked. */
+ in[0] = secp256k1_rand_bits(1) ? 6 : 7;
+ res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
+ if (firstb == 2 || firstb == 3) {
+ if (in[0] == firstb + 4) {
+ CHECK(res);
+ } else {
+ CHECK(!res);
+ }
+ }
+ if (res) {
+ ge_equals_ge(&elem,&elem2);
+ CHECK(secp256k1_eckey_pubkey_serialize(&elem, out, &size, 0));
+ CHECK(memcmp(&in[1], &out[1], 64) == 0);
+ }
+ }
+}
+
+void run_random_pubkeys(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_random_pubkeys();
+ }
+}
+
+void run_ecdsa_end_to_end(void) {
+ int i;
+ for (i = 0; i < 64*count; i++) {
+ test_ecdsa_end_to_end();
+ }
+}
+
+int test_ecdsa_der_parse(const unsigned char *sig, size_t siglen, int certainly_der, int certainly_not_der) {
+ static const unsigned char zeroes[32] = {0};
+#ifdef ENABLE_OPENSSL_TESTS
+ static const unsigned char max_scalar[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x40
+ };
+#endif
+
+ int ret = 0;
+
+ secp256k1_ecdsa_signature sig_der;
+ unsigned char roundtrip_der[2048];
+ unsigned char compact_der[64];
+ size_t len_der = 2048;
+ int parsed_der = 0, valid_der = 0, roundtrips_der = 0;
+
+ secp256k1_ecdsa_signature sig_der_lax;
+ unsigned char roundtrip_der_lax[2048];
+ unsigned char compact_der_lax[64];
+ size_t len_der_lax = 2048;
+ int parsed_der_lax = 0, valid_der_lax = 0, roundtrips_der_lax = 0;
+
+#ifdef ENABLE_OPENSSL_TESTS
+ ECDSA_SIG *sig_openssl;
+ const unsigned char *sigptr;
+ unsigned char roundtrip_openssl[2048];
+ int len_openssl = 2048;
+ int parsed_openssl, valid_openssl = 0, roundtrips_openssl = 0;
+#endif
+
+ parsed_der = secp256k1_ecdsa_signature_parse_der(ctx, &sig_der, sig, siglen);
+ if (parsed_der) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der, &sig_der)) << 0;
+ valid_der = (memcmp(compact_der, zeroes, 32) != 0) && (memcmp(compact_der + 32, zeroes, 32) != 0);
+ }
+ if (valid_der) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der, &len_der, &sig_der)) << 1;
+ roundtrips_der = (len_der == siglen) && memcmp(roundtrip_der, sig, siglen) == 0;
+ }
+
+ parsed_der_lax = ecdsa_signature_parse_der_lax(ctx, &sig_der_lax, sig, siglen);
+ if (parsed_der_lax) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_compact(ctx, compact_der_lax, &sig_der_lax)) << 10;
+ valid_der_lax = (memcmp(compact_der_lax, zeroes, 32) != 0) && (memcmp(compact_der_lax + 32, zeroes, 32) != 0);
+ }
+ if (valid_der_lax) {
+ ret |= (!secp256k1_ecdsa_signature_serialize_der(ctx, roundtrip_der_lax, &len_der_lax, &sig_der_lax)) << 11;
+ roundtrips_der_lax = (len_der_lax == siglen) && memcmp(roundtrip_der_lax, sig, siglen) == 0;
+ }
+
+ if (certainly_der) {
+ ret |= (!parsed_der) << 2;
+ }
+ if (certainly_not_der) {
+ ret |= (parsed_der) << 17;
+ }
+ if (valid_der) {
+ ret |= (!roundtrips_der) << 3;
+ }
+
+ if (valid_der) {
+ ret |= (!roundtrips_der_lax) << 12;
+ ret |= (len_der != len_der_lax) << 13;
+ ret |= (memcmp(roundtrip_der_lax, roundtrip_der, len_der) != 0) << 14;
+ }
+ ret |= (roundtrips_der != roundtrips_der_lax) << 15;
+ if (parsed_der) {
+ ret |= (!parsed_der_lax) << 16;
+ }
+
+#ifdef ENABLE_OPENSSL_TESTS
+ sig_openssl = ECDSA_SIG_new();
+ sigptr = sig;
+ parsed_openssl = (d2i_ECDSA_SIG(&sig_openssl, &sigptr, siglen) != NULL);
+ if (parsed_openssl) {
+ valid_openssl = !BN_is_negative(sig_openssl->r) && !BN_is_negative(sig_openssl->s) && BN_num_bits(sig_openssl->r) > 0 && BN_num_bits(sig_openssl->r) <= 256 && BN_num_bits(sig_openssl->s) > 0 && BN_num_bits(sig_openssl->s) <= 256;
+ if (valid_openssl) {
+ unsigned char tmp[32] = {0};
+ BN_bn2bin(sig_openssl->r, tmp + 32 - BN_num_bytes(sig_openssl->r));
+ valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ }
+ if (valid_openssl) {
+ unsigned char tmp[32] = {0};
+ BN_bn2bin(sig_openssl->s, tmp + 32 - BN_num_bytes(sig_openssl->s));
+ valid_openssl = memcmp(tmp, max_scalar, 32) < 0;
+ }
+ }
+ len_openssl = i2d_ECDSA_SIG(sig_openssl, NULL);
+ if (len_openssl <= 2048) {
+ unsigned char *ptr = roundtrip_openssl;
+ CHECK(i2d_ECDSA_SIG(sig_openssl, &ptr) == len_openssl);
+ roundtrips_openssl = valid_openssl && ((size_t)len_openssl == siglen) && (memcmp(roundtrip_openssl, sig, siglen) == 0);
+ } else {
+ len_openssl = 0;
+ }
+ ECDSA_SIG_free(sig_openssl);
+
+ ret |= (parsed_der && !parsed_openssl) << 4;
+ ret |= (valid_der && !valid_openssl) << 5;
+ ret |= (roundtrips_openssl && !parsed_der) << 6;
+ ret |= (roundtrips_der != roundtrips_openssl) << 7;
+ if (roundtrips_openssl) {
+ ret |= (len_der != (size_t)len_openssl) << 8;
+ ret |= (memcmp(roundtrip_der, roundtrip_openssl, len_der) != 0) << 9;
+ }
+#endif
+ return ret;
+}
+
+static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) {
+ size_t i;
+ for (i = 0; i < ptrlen; i++) {
+ int shift = ptrlen - 1 - i;
+ if (shift >= 4) {
+ ptr[i] = 0;
+ } else {
+ ptr[i] = (val >> shift) & 0xFF;
+ }
+ }
+}
+
+static void damage_array(unsigned char *sig, size_t *len) {
+ int pos;
+ int action = secp256k1_rand_bits(3);
+ if (action < 1 && *len > 3) {
+ /* Delete a byte. */
+ pos = secp256k1_rand_int(*len);
+ memmove(sig + pos, sig + pos + 1, *len - pos - 1);
+ (*len)--;
+ return;
+ } else if (action < 2 && *len < 2048) {
+ /* Insert a byte. */
+ pos = secp256k1_rand_int(1 + *len);
+ memmove(sig + pos + 1, sig + pos, *len - pos);
+ sig[pos] = secp256k1_rand_bits(8);
+ (*len)++;
+ return;
+ } else if (action < 4) {
+ /* Modify a byte. */
+ sig[secp256k1_rand_int(*len)] += 1 + secp256k1_rand_int(255);
+ return;
+ } else { /* action < 8 */
+ /* Modify a bit. */
+ sig[secp256k1_rand_int(*len)] ^= 1 << secp256k1_rand_bits(3);
+ return;
+ }
+}
+
+static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly_der, int* certainly_not_der) {
+ int der;
+ int nlow[2], nlen[2], nlenlen[2], nhbit[2], nhbyte[2], nzlen[2];
+ size_t tlen, elen, glen;
+ int indet;
+ int n;
+
+ *len = 0;
+ der = secp256k1_rand_bits(2) == 0;
+ *certainly_der = der;
+ *certainly_not_der = 0;
+ indet = der ? 0 : secp256k1_rand_int(10) == 0;
+
+ for (n = 0; n < 2; n++) {
+ /* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */
+ nlow[n] = der ? 1 : (secp256k1_rand_bits(3) != 0);
+ /* The length of the number in bytes (the first byte of which will always be nonzero) */
+ nlen[n] = nlow[n] ? secp256k1_rand_int(33) : 32 + secp256k1_rand_int(200) * secp256k1_rand_int(8) / 8;
+ CHECK(nlen[n] <= 232);
+ /* The top bit of the number. */
+ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_rand_bits(1));
+ /* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */
+ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_rand_bits(7) : 1 + secp256k1_rand_int(127));
+ /* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */
+ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_rand_int(3) : secp256k1_rand_int(300 - nlen[n]) * secp256k1_rand_int(8) / 8);
+ if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) {
+ *certainly_not_der = 1;
+ }
+ CHECK(nlen[n] + nzlen[n] <= 300);
+ /* The length of the length descriptor for the number. 0 means short encoding, anything else is long encoding. */
+ nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2);
+ if (!der) {
+ /* nlenlen[n] max 127 bytes */
+ int add = secp256k1_rand_int(127 - nlenlen[n]) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ nlenlen[n] += add;
+ if (add != 0) {
+ *certainly_not_der = 1;
+ }
+ }
+ CHECK(nlen[n] + nzlen[n] + nlenlen[n] <= 427);
+ }
+
+ /* The total length of the data to go, so far */
+ tlen = 2 + nlenlen[0] + nlen[0] + nzlen[0] + 2 + nlenlen[1] + nlen[1] + nzlen[1];
+ CHECK(tlen <= 856);
+
+ /* The length of the garbage inside the tuple. */
+ elen = (der || indet) ? 0 : secp256k1_rand_int(980 - tlen) * secp256k1_rand_int(8) / 8;
+ if (elen != 0) {
+ *certainly_not_der = 1;
+ }
+ tlen += elen;
+ CHECK(tlen <= 980);
+
+ /* The length of the garbage after the end of the tuple. */
+ glen = der ? 0 : secp256k1_rand_int(990 - tlen) * secp256k1_rand_int(8) / 8;
+ if (glen != 0) {
+ *certainly_not_der = 1;
+ }
+ CHECK(tlen + glen <= 990);
+
+ /* Write the tuple header. */
+ sig[(*len)++] = 0x30;
+ if (indet) {
+ /* Indeterminate length */
+ sig[(*len)++] = 0x80;
+ *certainly_not_der = 1;
+ } else {
+ int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2);
+ if (!der) {
+ int add = secp256k1_rand_int(127 - tlenlen) * secp256k1_rand_int(16) * secp256k1_rand_int(16) / 256;
+ tlenlen += add;
+ if (add != 0) {
+ *certainly_not_der = 1;
+ }
+ }
+ if (tlenlen == 0) {
+ /* Short length notation */
+ sig[(*len)++] = tlen;
+ } else {
+ /* Long length notation */
+ sig[(*len)++] = 128 + tlenlen;
+ assign_big_endian(sig + *len, tlenlen, tlen);
+ *len += tlenlen;
+ }
+ tlen += tlenlen;
+ }
+ tlen += 2;
+ CHECK(tlen + glen <= 1119);
+
+ for (n = 0; n < 2; n++) {
+ /* Write the integer header. */
+ sig[(*len)++] = 0x02;
+ if (nlenlen[n] == 0) {
+ /* Short length notation */
+ sig[(*len)++] = nlen[n] + nzlen[n];
+ } else {
+ /* Long length notation. */
+ sig[(*len)++] = 128 + nlenlen[n];
+ assign_big_endian(sig + *len, nlenlen[n], nlen[n] + nzlen[n]);
+ *len += nlenlen[n];
+ }
+ /* Write zero padding */
+ while (nzlen[n] > 0) {
+ sig[(*len)++] = 0x00;
+ nzlen[n]--;
+ }
+ if (nlen[n] == 32 && !nlow[n]) {
+ /* Special extra 16 0xFF bytes in "high" 32-byte numbers */
+ int i;
+ for (i = 0; i < 16; i++) {
+ sig[(*len)++] = 0xFF;
+ }
+ nlen[n] -= 16;
+ }
+ /* Write first byte of number */
+ if (nlen[n] > 0) {
+ sig[(*len)++] = nhbyte[n];
+ nlen[n]--;
+ }
+ /* Generate remaining random bytes of number */
+ secp256k1_rand_bytes_test(sig + *len, nlen[n]);
+ *len += nlen[n];
+ nlen[n] = 0;
+ }
+
+ /* Generate random garbage inside tuple. */
+ secp256k1_rand_bytes_test(sig + *len, elen);
+ *len += elen;
+
+ /* Generate end-of-contents bytes. */
+ if (indet) {
+ sig[(*len)++] = 0;
+ sig[(*len)++] = 0;
+ tlen += 2;
+ }
+ CHECK(tlen + glen <= 1121);
+
+ /* Generate random garbage outside tuple. */
+ secp256k1_rand_bytes_test(sig + *len, glen);
+ *len += glen;
+ tlen += glen;
+ CHECK(tlen <= 1121);
+ CHECK(tlen == *len);
+}
+
+void run_ecdsa_der_parse(void) {
+ int i,j;
+ for (i = 0; i < 200 * count; i++) {
+ unsigned char buffer[2048];
+ size_t buflen = 0;
+ int certainly_der = 0;
+ int certainly_not_der = 0;
+ random_ber_signature(buffer, &buflen, &certainly_der, &certainly_not_der);
+ CHECK(buflen <= 2048);
+ for (j = 0; j < 16; j++) {
+ int ret = 0;
+ if (j > 0) {
+ damage_array(buffer, &buflen);
+ /* We don't know anything anymore about the DERness of the result */
+ certainly_der = 0;
+ certainly_not_der = 0;
+ }
+ ret = test_ecdsa_der_parse(buffer, buflen, certainly_der, certainly_not_der);
+ if (ret != 0) {
+ size_t k;
+ fprintf(stderr, "Failure %x on ", ret);
+ for (k = 0; k < buflen; k++) {
+ fprintf(stderr, "%02x ", buffer[k]);
+ }
+ fprintf(stderr, "\n");
+ }
+ CHECK(ret == 0);
+ }
+ }
+}
+
+/* Tests several edge cases. */
+void test_ecdsa_edge_cases(void) {
+ int t;
+ secp256k1_ecdsa_signature sig;
+
+ /* Test the case where ECDSA recomputes a point that is infinity. */
+ {
+ secp256k1_gej keyj;
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ secp256k1_scalar_inverse(&ss, &ss);
+ secp256k1_scalar_set_int(&sr, 1);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &keyj, &sr);
+ secp256k1_ge_set_gej(&key, &keyj);
+ msg = ss;
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with r of zero fails. */
+ {
+ const unsigned char pubkey_mods_zero[33] = {
+ 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0,
+ 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41,
+ 0x41
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 0);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey_mods_zero, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with s of zero fails. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 0);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 1);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Verify signature with message 0 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02
+ };
+ const unsigned char pubkey2[33] = {
+ 0x02, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xfe, 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0,
+ 0x3b, 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41,
+ 0x43
+ };
+ secp256k1_ge key;
+ secp256k1_ge key2;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 2);
+ secp256k1_scalar_set_int(&msg, 0);
+ secp256k1_scalar_set_int(&sr, 2);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0);
+ }
+
+ /* Verify signature with message 1 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x02, 0x14, 0x4e, 0x5a, 0x58, 0xef, 0x5b, 0x22,
+ 0x6f, 0xd2, 0xe2, 0x07, 0x6a, 0x77, 0xcf, 0x05,
+ 0xb4, 0x1d, 0xe7, 0x4a, 0x30, 0x98, 0x27, 0x8c,
+ 0x93, 0xe6, 0xe6, 0x3c, 0x0b, 0xc4, 0x73, 0x76,
+ 0x25
+ };
+ const unsigned char pubkey2[33] = {
+ 0x02, 0x8a, 0xd5, 0x37, 0xed, 0x73, 0xd9, 0x40,
+ 0x1d, 0xa0, 0x33, 0xd2, 0xdc, 0xf0, 0xaf, 0xae,
+ 0x34, 0xcf, 0x5f, 0x96, 0x4c, 0x73, 0x28, 0x0f,
+ 0x92, 0xc0, 0xf6, 0x9d, 0xd9, 0xb2, 0x09, 0x10,
+ 0x62
+ };
+ const unsigned char csr[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xeb
+ };
+ secp256k1_ge key;
+ secp256k1_ge key2;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 1);
+ secp256k1_scalar_set_b32(&sr, csr, NULL);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_eckey_pubkey_parse(&key2, pubkey2, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 2);
+ secp256k1_scalar_inverse_var(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key2, &msg) == 0);
+ }
+
+ /* Verify signature with message -1 passes. */
+ {
+ const unsigned char pubkey[33] = {
+ 0x03, 0xaf, 0x97, 0xff, 0x7d, 0x3a, 0xf6, 0xa0,
+ 0x02, 0x94, 0xbd, 0x9f, 0x4b, 0x2e, 0xd7, 0x52,
+ 0x28, 0xdb, 0x49, 0x2a, 0x65, 0xcb, 0x1e, 0x27,
+ 0x57, 0x9c, 0xba, 0x74, 0x20, 0xd5, 0x1d, 0x20,
+ 0xf1
+ };
+ const unsigned char csr[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ 0x45, 0x51, 0x23, 0x19, 0x50, 0xb7, 0x5f, 0xc4,
+ 0x40, 0x2d, 0xa1, 0x72, 0x2f, 0xc9, 0xba, 0xee
+ };
+ secp256k1_ge key;
+ secp256k1_scalar msg;
+ secp256k1_scalar sr, ss;
+ secp256k1_scalar_set_int(&ss, 1);
+ secp256k1_scalar_set_int(&msg, 1);
+ secp256k1_scalar_negate(&msg, &msg);
+ secp256k1_scalar_set_b32(&sr, csr, NULL);
+ CHECK(secp256k1_eckey_pubkey_parse(&key, pubkey, 33));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ secp256k1_scalar_negate(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 1);
+ secp256k1_scalar_set_int(&ss, 3);
+ secp256k1_scalar_inverse_var(&ss, &ss);
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sr, &ss, &key, &msg) == 0);
+ }
+
+ /* Signature where s would be zero. */
+ {
+ secp256k1_pubkey pubkey;
+ size_t siglen;
+ int32_t ecount;
+ unsigned char signature[72];
+ static const unsigned char nonce[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ static const unsigned char nonce2[32] = {
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,
+ 0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFE,
+ 0xBA,0xAE,0xDC,0xE6,0xAF,0x48,0xA0,0x3B,
+ 0xBF,0xD2,0x5E,0x8C,0xD0,0x36,0x41,0x40
+ };
+ const unsigned char key[32] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01,
+ };
+ unsigned char msg[32] = {
+ 0x86, 0x41, 0x99, 0x81, 0x06, 0x23, 0x44, 0x53,
+ 0xaa, 0x5f, 0x9d, 0x6a, 0x31, 0x78, 0xf4, 0xf7,
+ 0xb8, 0x12, 0xe0, 0x0b, 0x81, 0x7a, 0x77, 0x62,
+ 0x65, 0xdf, 0xdd, 0x31, 0xb9, 0x3e, 0x29, 0xa9,
+ };
+ ecount = 0;
+ secp256k1_context_set_illegal_callback(ctx, counting_illegal_callback_fn, &ecount);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 0);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 0);
+ msg[31] = 0xaa;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce) == 1);
+ CHECK(ecount == 0);
+ CHECK(secp256k1_ecdsa_sign(ctx, NULL, msg, key, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, NULL, key, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, NULL, precomputed_nonce_function, nonce2) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, precomputed_nonce_function, nonce2) == 1);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, key) == 1);
+ CHECK(secp256k1_ecdsa_verify(ctx, NULL, msg, &pubkey) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, NULL, &pubkey) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, NULL) == 0);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 1);
+ CHECK(ecount == 6);
+ CHECK(secp256k1_ec_pubkey_create(ctx, &pubkey, NULL) == 0);
+ CHECK(ecount == 7);
+ /* That pubkeyload fails via an ARGCHECK is a little odd but makes sense because pubkeys are an opaque data type. */
+ CHECK(secp256k1_ecdsa_verify(ctx, &sig, msg, &pubkey) == 0);
+ CHECK(ecount == 8);
+ siglen = 72;
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, NULL, &siglen, &sig) == 0);
+ CHECK(ecount == 9);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, NULL, &sig) == 0);
+ CHECK(ecount == 10);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, NULL) == 0);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 1);
+ CHECK(ecount == 11);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, NULL, signature, siglen) == 0);
+ CHECK(ecount == 12);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, NULL, siglen) == 0);
+ CHECK(ecount == 13);
+ CHECK(secp256k1_ecdsa_signature_parse_der(ctx, &sig, signature, siglen) == 1);
+ CHECK(ecount == 13);
+ siglen = 10;
+ /* Too little room for a signature does not fail via ARGCHECK. */
+ CHECK(secp256k1_ecdsa_signature_serialize_der(ctx, signature, &siglen, &sig) == 0);
+ CHECK(ecount == 13);
+ ecount = 0;
+ CHECK(secp256k1_ecdsa_signature_normalize(ctx, NULL, NULL) == 0);
+ CHECK(ecount == 1);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, NULL, &sig) == 0);
+ CHECK(ecount == 2);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, NULL) == 0);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_signature_serialize_compact(ctx, signature, &sig) == 1);
+ CHECK(ecount == 3);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, NULL, signature) == 0);
+ CHECK(ecount == 4);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, NULL) == 0);
+ CHECK(ecount == 5);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 1);
+ CHECK(ecount == 5);
+ memset(signature, 255, 64);
+ CHECK(secp256k1_ecdsa_signature_parse_compact(ctx, &sig, signature) == 0);
+ CHECK(ecount == 5);
+ secp256k1_context_set_illegal_callback(ctx, NULL, NULL);
+ }
+
+ /* Nonce function corner cases. */
+ for (t = 0; t < 2; t++) {
+ static const unsigned char zero[32] = {0x00};
+ int i;
+ unsigned char key[32];
+ unsigned char msg[32];
+ secp256k1_ecdsa_signature sig2;
+ secp256k1_scalar sr[512], ss;
+ const unsigned char *extra;
+ extra = t == 0 ? NULL : zero;
+ memset(msg, 0, 32);
+ msg[31] = 1;
+ /* High key results in signature failure. */
+ memset(key, 0xFF, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Zero key results in signature failure. */
+ memset(key, 0, 32);
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, NULL, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* Nonce function failure results in signature failure. */
+ key[31] = 1;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_fail, extra) == 0);
+ CHECK(is_empty_signature(&sig));
+ /* The retry loop successfully makes its way to the first good value. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig, msg, key, nonce_function_test_retry, extra) == 1);
+ CHECK(!is_empty_signature(&sig));
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, nonce_function_rfc6979, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function is deterministic. */
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ CHECK(memcmp(&sig, &sig2, sizeof(sig)) == 0);
+ /* The default nonce function changes output with different messages. */
+ for(i = 0; i < 256; i++) {
+ int j;
+ msg[0] = i;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ msg[0] = 0;
+ msg[31] = 2;
+ /* The default nonce function changes output with different keys. */
+ for(i = 256; i < 512; i++) {
+ int j;
+ key[0] = i - 256;
+ CHECK(secp256k1_ecdsa_sign(ctx, &sig2, msg, key, NULL, extra) == 1);
+ CHECK(!is_empty_signature(&sig2));
+ secp256k1_ecdsa_signature_load(ctx, &sr[i], &ss, &sig2);
+ for (j = 0; j < i; j++) {
+ CHECK(!secp256k1_scalar_eq(&sr[i], &sr[j]));
+ }
+ }
+ key[0] = 0;
+ }
+
+ {
+ /* Check that optional nonce arguments do not have equivalent effect. */
+ const unsigned char zeros[32] = {0};
+ unsigned char nonce[32];
+ unsigned char nonce2[32];
+ unsigned char nonce3[32];
+ unsigned char nonce4[32];
+ VG_UNDEF(nonce,32);
+ VG_UNDEF(nonce2,32);
+ VG_UNDEF(nonce3,32);
+ VG_UNDEF(nonce4,32);
+ CHECK(nonce_function_rfc6979(nonce, zeros, zeros, NULL, NULL, 0) == 1);
+ VG_CHECK(nonce,32);
+ CHECK(nonce_function_rfc6979(nonce2, zeros, zeros, zeros, NULL, 0) == 1);
+ VG_CHECK(nonce2,32);
+ CHECK(nonce_function_rfc6979(nonce3, zeros, zeros, NULL, (void *)zeros, 0) == 1);
+ VG_CHECK(nonce3,32);
+ CHECK(nonce_function_rfc6979(nonce4, zeros, zeros, zeros, (void *)zeros, 0) == 1);
+ VG_CHECK(nonce4,32);
+ CHECK(memcmp(nonce, nonce2, 32) != 0);
+ CHECK(memcmp(nonce, nonce3, 32) != 0);
+ CHECK(memcmp(nonce, nonce4, 32) != 0);
+ CHECK(memcmp(nonce2, nonce3, 32) != 0);
+ CHECK(memcmp(nonce2, nonce4, 32) != 0);
+ CHECK(memcmp(nonce3, nonce4, 32) != 0);
+ }
+
+
+ /* Privkey export where pubkey is the point at infinity. */
+ {
+ unsigned char privkey[300];
+ unsigned char seckey[32] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xfe,
+ 0xba, 0xae, 0xdc, 0xe6, 0xaf, 0x48, 0xa0, 0x3b,
+ 0xbf, 0xd2, 0x5e, 0x8c, 0xd0, 0x36, 0x41, 0x41,
+ };
+ size_t outlen = 300;
+ CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 0));
+ outlen = 300;
+ CHECK(!ec_privkey_export_der(ctx, privkey, &outlen, seckey, 1));
+ }
+}
+
+void run_ecdsa_edge_cases(void) {
+ test_ecdsa_edge_cases();
+}
+
+#ifdef ENABLE_OPENSSL_TESTS
+EC_KEY *get_openssl_key(const unsigned char *key32) {
+ unsigned char privkey[300];
+ size_t privkeylen;
+ const unsigned char* pbegin = privkey;
+ int compr = secp256k1_rand_bits(1);
+ EC_KEY *ec_key = EC_KEY_new_by_curve_name(NID_secp256k1);
+ CHECK(ec_privkey_export_der(ctx, privkey, &privkeylen, key32, compr));
+ CHECK(d2i_ECPrivateKey(&ec_key, &pbegin, privkeylen));
+ CHECK(EC_KEY_check_key(ec_key));
+ return ec_key;
+}
+
+void test_ecdsa_openssl(void) {
+ secp256k1_gej qj;
+ secp256k1_ge q;
+ secp256k1_scalar sigr, sigs;
+ secp256k1_scalar one;
+ secp256k1_scalar msg2;
+ secp256k1_scalar key, msg;
+ EC_KEY *ec_key;
+ unsigned int sigsize = 80;
+ size_t secp_sigsize = 80;
+ unsigned char message[32];
+ unsigned char signature[80];
+ unsigned char key32[32];
+ secp256k1_rand256_test(message);
+ secp256k1_scalar_set_b32(&msg, message, NULL);
+ random_scalar_order_test(&key);
+ secp256k1_scalar_get_b32(key32, &key);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &qj, &key);
+ secp256k1_ge_set_gej(&q, &qj);
+ ec_key = get_openssl_key(key32);
+ CHECK(ec_key != NULL);
+ CHECK(ECDSA_sign(0, message, sizeof(message), signature, &sigsize, ec_key));
+ CHECK(secp256k1_ecdsa_sig_parse(&sigr, &sigs, signature, sigsize));
+ CHECK(secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg));
+ secp256k1_scalar_set_int(&one, 1);
+ secp256k1_scalar_add(&msg2, &msg, &one);
+ CHECK(!secp256k1_ecdsa_sig_verify(&ctx->ecmult_ctx, &sigr, &sigs, &q, &msg2));
+
+ random_sign(&sigr, &sigs, &key, &msg, NULL);
+ CHECK(secp256k1_ecdsa_sig_serialize(signature, &secp_sigsize, &sigr, &sigs));
+ CHECK(ECDSA_verify(0, message, sizeof(message), signature, secp_sigsize, ec_key) == 1);
+
+ EC_KEY_free(ec_key);
+}
+
+void run_ecdsa_openssl(void) {
+ int i;
+ for (i = 0; i < 10*count; i++) {
+ test_ecdsa_openssl();
+ }
+}
+#endif
+
+#ifdef ENABLE_MODULE_ECDH
+# include "modules/ecdh/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+# include "modules/schnorr/tests_impl.h"
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+# include "modules/recovery/tests_impl.h"
+#endif
+
+int main(int argc, char **argv) {
+ unsigned char seed16[16] = {0};
+ unsigned char run32[32] = {0};
+ /* find iteration count */
+ if (argc > 1) {
+ count = strtol(argv[1], NULL, 0);
+ }
+
+ /* find random seed */
+ if (argc > 2) {
+ int pos = 0;
+ const char* ch = argv[2];
+ while (pos < 16 && ch[0] != 0 && ch[1] != 0) {
+ unsigned short sh;
+ if (sscanf(ch, "%2hx", &sh)) {
+ seed16[pos] = sh;
+ } else {
+ break;
+ }
+ ch += 2;
+ pos++;
+ }
+ } else {
+ FILE *frand = fopen("/dev/urandom", "r");
+ if ((frand == NULL) || !fread(&seed16, sizeof(seed16), 1, frand)) {
+ uint64_t t = time(NULL) * (uint64_t)1337;
+ seed16[0] ^= t;
+ seed16[1] ^= t >> 8;
+ seed16[2] ^= t >> 16;
+ seed16[3] ^= t >> 24;
+ seed16[4] ^= t >> 32;
+ seed16[5] ^= t >> 40;
+ seed16[6] ^= t >> 48;
+ seed16[7] ^= t >> 56;
+ }
+ fclose(frand);
+ }
+ secp256k1_rand_seed(seed16);
+
+ printf("test count = %i\n", count);
+ printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
+
+ /* initialize */
+ run_context_tests();
+ ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+ if (secp256k1_rand_bits(1)) {
+ secp256k1_rand256(run32);
+ CHECK(secp256k1_context_randomize(ctx, secp256k1_rand_bits(1) ? run32 : NULL));
+ }
+
+ run_rand_bits();
+ run_rand_int();
+
+ run_sha256_tests();
+ run_hmac_sha256_tests();
+ run_rfc6979_hmac_sha256_tests();
+
+#ifndef USE_NUM_NONE
+ /* num tests */
+ run_num_smalltests();
+#endif
+
+ /* scalar tests */
+ run_scalar_tests();
+
+ /* field tests */
+ run_field_inv();
+ run_field_inv_var();
+ run_field_inv_all_var();
+ run_field_misc();
+ run_field_convert();
+ run_sqr();
+ run_sqrt();
+
+ /* group tests */
+ run_ge();
+ run_group_decompress();
+
+ /* ecmult tests */
+ run_wnaf();
+ run_point_times_order();
+ run_ecmult_chain();
+ run_ecmult_constants();
+ run_ecmult_gen_blind();
+ run_ecmult_const_tests();
+ run_ec_combine();
+
+ /* endomorphism tests */
+#ifdef USE_ENDOMORPHISM
+ run_endomorphism_tests();
+#endif
+
+ /* EC point parser test */
+ run_ec_pubkey_parse_test();
+
+ /* EC key edge cases */
+ run_eckey_edge_case_test();
+
+#ifdef ENABLE_MODULE_ECDH
+ /* ecdh tests */
+ run_ecdh_tests();
+#endif
+
+ /* ecdsa tests */
+ run_random_pubkeys();
+ run_ecdsa_der_parse();
+ run_ecdsa_sign_verify();
+ run_ecdsa_end_to_end();
+ run_ecdsa_edge_cases();
+#ifdef ENABLE_OPENSSL_TESTS
+ run_ecdsa_openssl();
+#endif
+
+#ifdef ENABLE_MODULE_SCHNORR
+ /* Schnorr tests */
+ run_schnorr_tests();
+#endif
+
+#ifdef ENABLE_MODULE_RECOVERY
+ /* ECDSA pubkey recovery tests */
+ run_recovery_tests();
+#endif
+
+ secp256k1_rand256(run32);
+ printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
+
+ /* shutdown */
+ secp256k1_context_destroy(ctx);
+
+ printf("no problems found\n");
+ return 0;
+}
diff --git a/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c b/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c
new file mode 100644
index 000000000..b040bb073
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/tests_exhaustive.c
@@ -0,0 +1,470 @@
+/***********************************************************************
+ * Copyright (c) 2016 Andrew Poelstra *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+
+#include
+
+#undef USE_ECMULT_STATIC_PRECOMPUTATION
+
+#ifndef EXHAUSTIVE_TEST_ORDER
+/* see group_impl.h for allowable values */
+#define EXHAUSTIVE_TEST_ORDER 13
+#define EXHAUSTIVE_TEST_LAMBDA 9 /* cube root of 1 mod 13 */
+#endif
+
+#include "include/secp256k1.h"
+#include "group.h"
+#include "secp256k1.c"
+#include "testrand_impl.h"
+
+#ifdef ENABLE_MODULE_RECOVERY
+#include "src/modules/recovery/main_impl.h"
+#include "include/secp256k1_recovery.h"
+#endif
+
+/** stolen from tests.c */
+void ge_equals_ge(const secp256k1_ge *a, const secp256k1_ge *b) {
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ CHECK(secp256k1_fe_equal_var(&a->x, &b->x));
+ CHECK(secp256k1_fe_equal_var(&a->y, &b->y));
+}
+
+void ge_equals_gej(const secp256k1_ge *a, const secp256k1_gej *b) {
+ secp256k1_fe z2s;
+ secp256k1_fe u1, u2, s1, s2;
+ CHECK(a->infinity == b->infinity);
+ if (a->infinity) {
+ return;
+ }
+ /* Check a.x * b.z^2 == b.x && a.y * b.z^3 == b.y, to avoid inverses. */
+ secp256k1_fe_sqr(&z2s, &b->z);
+ secp256k1_fe_mul(&u1, &a->x, &z2s);
+ u2 = b->x; secp256k1_fe_normalize_weak(&u2);
+ secp256k1_fe_mul(&s1, &a->y, &z2s); secp256k1_fe_mul(&s1, &s1, &b->z);
+ s2 = b->y; secp256k1_fe_normalize_weak(&s2);
+ CHECK(secp256k1_fe_equal_var(&u1, &u2));
+ CHECK(secp256k1_fe_equal_var(&s1, &s2));
+}
+
+void random_fe(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ secp256k1_rand256(bin);
+ if (secp256k1_fe_set_b32(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+/** END stolen from tests.c */
+
+int secp256k1_nonce_function_smallint(unsigned char *nonce32, const unsigned char *msg32,
+ const unsigned char *key32, const unsigned char *algo16,
+ void *data, unsigned int attempt) {
+ secp256k1_scalar s;
+ int *idata = data;
+ (void)msg32;
+ (void)key32;
+ (void)algo16;
+ /* Some nonces cannot be used because they'd cause s and/or r to be zero.
+ * The signing function has retry logic here that just re-calls the nonce
+ * function with an increased `attempt`. So if attempt > 0 this means we
+ * need to change the nonce to avoid an infinite loop. */
+ if (attempt > 0) {
+ *idata = (*idata + 1) % EXHAUSTIVE_TEST_ORDER;
+ }
+ secp256k1_scalar_set_int(&s, *idata);
+ secp256k1_scalar_get_b32(nonce32, &s);
+ return 1;
+}
+
+#ifdef USE_ENDOMORPHISM
+void test_exhaustive_endomorphism(const secp256k1_ge *group, int order) {
+ int i;
+ for (i = 0; i < order; i++) {
+ secp256k1_ge res;
+ secp256k1_ge_mul_lambda(&res, &group[i]);
+ ge_equals_ge(&group[i * EXHAUSTIVE_TEST_LAMBDA % EXHAUSTIVE_TEST_ORDER], &res);
+ }
+}
+#endif
+
+void test_exhaustive_addition(const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j;
+
+ /* Sanity-check (and check infinity functions) */
+ CHECK(secp256k1_ge_is_infinity(&group[0]));
+ CHECK(secp256k1_gej_is_infinity(&groupj[0]));
+ for (i = 1; i < order; i++) {
+ CHECK(!secp256k1_ge_is_infinity(&group[i]));
+ CHECK(!secp256k1_gej_is_infinity(&groupj[i]));
+ }
+
+ /* Check all addition formulae */
+ for (j = 0; j < order; j++) {
+ secp256k1_fe fe_inv;
+ secp256k1_fe_inv(&fe_inv, &groupj[j].z);
+ for (i = 0; i < order; i++) {
+ secp256k1_ge zless_gej;
+ secp256k1_gej tmp;
+ /* add_var */
+ secp256k1_gej_add_var(&tmp, &groupj[i], &groupj[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_ge */
+ if (j > 0) {
+ secp256k1_gej_add_ge(&tmp, &groupj[i], &group[j]);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ /* add_ge_var */
+ secp256k1_gej_add_ge_var(&tmp, &groupj[i], &group[j], NULL);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ /* add_zinv_var */
+ zless_gej.infinity = groupj[j].infinity;
+ zless_gej.x = groupj[j].x;
+ zless_gej.y = groupj[j].y;
+ secp256k1_gej_add_zinv_var(&tmp, &groupj[i], &zless_gej, &fe_inv);
+ ge_equals_gej(&group[(i + j) % order], &tmp);
+ }
+ }
+
+ /* Check doubling */
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ if (i > 0) {
+ secp256k1_gej_double_nonzero(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+ secp256k1_gej_double_var(&tmp, &groupj[i], NULL);
+ ge_equals_gej(&group[(2 * i) % order], &tmp);
+ }
+
+ /* Check negation */
+ for (i = 1; i < order; i++) {
+ secp256k1_ge tmp;
+ secp256k1_gej tmpj;
+ secp256k1_ge_neg(&tmp, &group[i]);
+ ge_equals_ge(&group[order - i], &tmp);
+ secp256k1_gej_neg(&tmpj, &groupj[i]);
+ ge_equals_gej(&group[order - i], &tmpj);
+ }
+}
+
+void test_exhaustive_ecmult(const secp256k1_context *ctx, const secp256k1_ge *group, const secp256k1_gej *groupj, int order) {
+ int i, j, r_log;
+ for (r_log = 1; r_log < order; r_log++) {
+ for (j = 0; j < order; j++) {
+ for (i = 0; i < order; i++) {
+ secp256k1_gej tmp;
+ secp256k1_scalar na, ng;
+ secp256k1_scalar_set_int(&na, i);
+ secp256k1_scalar_set_int(&ng, j);
+
+ secp256k1_ecmult(&ctx->ecmult_ctx, &tmp, &groupj[r_log], &na, &ng);
+ ge_equals_gej(&group[(i * r_log + j) % order], &tmp);
+
+ if (i > 0) {
+ secp256k1_ecmult_const(&tmp, &group[i], &ng);
+ ge_equals_gej(&group[(i * j) % order], &tmp);
+ }
+ }
+ }
+ }
+}
+
+void r_from_k(secp256k1_scalar *r, const secp256k1_ge *group, int k) {
+ secp256k1_fe x;
+ unsigned char x_bin[32];
+ k %= EXHAUSTIVE_TEST_ORDER;
+ x = group[k].x;
+ secp256k1_fe_normalize(&x);
+ secp256k1_fe_get_b32(x_bin, &x);
+ secp256k1_scalar_set_b32(r, x_bin, NULL);
+}
+
+void test_exhaustive_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int s, r, msg, key;
+ for (s = 1; s < order; s++) {
+ for (r = 1; r < order; r++) {
+ for (msg = 1; msg < order; msg++) {
+ for (key = 1; key < order; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < order; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* Verify by calling verify */
+ secp256k1_ecdsa_signature_save(&sig, &r_s, &s_s);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k;
+
+ /* Loop */
+ for (i = 1; i < order; i++) { /* message */
+ for (j = 1; j < order; j++) { /* key */
+ for (k = 1; k < order; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign(ctx, &sig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+
+ /* We would like to verify zero-knowledge here by counting how often every
+ * possible (s, r) tuple appears, but because the group order is larger
+ * than the field order, when coercing the x-values to scalar values, some
+ * appear more often than others, so we are actually not zero-knowledge.
+ * (This effect also appears in the real code, but the difference is on the
+ * order of 1/2^128th the field order, so the deviation is not useful to a
+ * computationally bounded attacker.)
+ */
+}
+
+#ifdef ENABLE_MODULE_RECOVERY
+void test_exhaustive_recovery_sign(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ int i, j, k;
+
+ /* Loop */
+ for (i = 1; i < order; i++) { /* message */
+ for (j = 1; j < order; j++) { /* key */
+ for (k = 1; k < order; k++) { /* nonce */
+ const int starting_k = k;
+ secp256k1_fe r_dot_y_normalized;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_scalar sk, msg, r, s, expected_r;
+ unsigned char sk32[32], msg32[32];
+ int expected_recid;
+ int recid;
+ secp256k1_scalar_set_int(&msg, i);
+ secp256k1_scalar_set_int(&sk, j);
+ secp256k1_scalar_get_b32(sk32, &sk);
+ secp256k1_scalar_get_b32(msg32, &msg);
+
+ secp256k1_ecdsa_sign_recoverable(ctx, &rsig, msg32, sk32, secp256k1_nonce_function_smallint, &k);
+
+ /* Check directly */
+ secp256k1_ecdsa_recoverable_signature_load(ctx, &r, &s, &recid, &rsig);
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+ /* In computing the recid, there is an overflow condition that is disabled in
+ * scalar_low_impl.h `secp256k1_scalar_set_b32` because almost every r.y value
+ * will exceed the group order, and our signing code always holds out for r
+ * values that don't overflow, so with a proper overflow check the tests would
+ * loop indefinitely. */
+ r_dot_y_normalized = group[k].y;
+ secp256k1_fe_normalize(&r_dot_y_normalized);
+ /* Also the recovery id is flipped depending if we hit the low-s branch */
+ if ((k * s) % order == (i + r * j) % order) {
+ expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 1 : 0;
+ } else {
+ expected_recid = secp256k1_fe_is_odd(&r_dot_y_normalized) ? 0 : 1;
+ }
+ CHECK(recid == expected_recid);
+
+ /* Convert to a standard sig then check */
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ secp256k1_ecdsa_signature_load(ctx, &r, &s, &sig);
+ /* Note that we compute expected_r *after* signing -- this is important
+ * because our nonce-computing function function might change k during
+ * signing. */
+ r_from_k(&expected_r, group, k);
+ CHECK(r == expected_r);
+ CHECK((k * s) % order == (i + r * j) % order ||
+ (k * (EXHAUSTIVE_TEST_ORDER - s)) % order == (i + r * j) % order);
+
+ /* Overflow means we've tried every possible nonce */
+ if (k < starting_k) {
+ break;
+ }
+ }
+ }
+ }
+}
+
+void test_exhaustive_recovery_verify(const secp256k1_context *ctx, const secp256k1_ge *group, int order) {
+ /* This is essentially a copy of test_exhaustive_verify, with recovery added */
+ int s, r, msg, key;
+ for (s = 1; s < order; s++) {
+ for (r = 1; r < order; r++) {
+ for (msg = 1; msg < order; msg++) {
+ for (key = 1; key < order; key++) {
+ secp256k1_ge nonconst_ge;
+ secp256k1_ecdsa_recoverable_signature rsig;
+ secp256k1_ecdsa_signature sig;
+ secp256k1_pubkey pk;
+ secp256k1_scalar sk_s, msg_s, r_s, s_s;
+ secp256k1_scalar s_times_k_s, msg_plus_r_times_sk_s;
+ int recid = 0;
+ int k, should_verify;
+ unsigned char msg32[32];
+
+ secp256k1_scalar_set_int(&s_s, s);
+ secp256k1_scalar_set_int(&r_s, r);
+ secp256k1_scalar_set_int(&msg_s, msg);
+ secp256k1_scalar_set_int(&sk_s, key);
+ secp256k1_scalar_get_b32(msg32, &msg_s);
+
+ /* Verify by hand */
+ /* Run through every k value that gives us this r and check that *one* works.
+ * Note there could be none, there could be multiple, ECDSA is weird. */
+ should_verify = 0;
+ for (k = 0; k < order; k++) {
+ secp256k1_scalar check_x_s;
+ r_from_k(&check_x_s, group, k);
+ if (r_s == check_x_s) {
+ secp256k1_scalar_set_int(&s_times_k_s, k);
+ secp256k1_scalar_mul(&s_times_k_s, &s_times_k_s, &s_s);
+ secp256k1_scalar_mul(&msg_plus_r_times_sk_s, &r_s, &sk_s);
+ secp256k1_scalar_add(&msg_plus_r_times_sk_s, &msg_plus_r_times_sk_s, &msg_s);
+ should_verify |= secp256k1_scalar_eq(&s_times_k_s, &msg_plus_r_times_sk_s);
+ }
+ }
+ /* nb we have a "high s" rule */
+ should_verify &= !secp256k1_scalar_is_high(&s_s);
+
+ /* We would like to try recovering the pubkey and checking that it matches,
+ * but pubkey recovery is impossible in the exhaustive tests (the reason
+ * being that there are 12 nonzero r values, 12 nonzero points, and no
+ * overlap between the sets, so there are no valid signatures). */
+
+ /* Verify by converting to a standard signature and calling verify */
+ secp256k1_ecdsa_recoverable_signature_save(&rsig, &r_s, &s_s, recid);
+ secp256k1_ecdsa_recoverable_signature_convert(ctx, &sig, &rsig);
+ memcpy(&nonconst_ge, &group[sk_s], sizeof(nonconst_ge));
+ secp256k1_pubkey_save(&pk, &nonconst_ge);
+ CHECK(should_verify ==
+ secp256k1_ecdsa_verify(ctx, &sig, msg32, &pk));
+ }
+ }
+ }
+ }
+}
+#endif
+
+int main(void) {
+ int i;
+ secp256k1_gej groupj[EXHAUSTIVE_TEST_ORDER];
+ secp256k1_ge group[EXHAUSTIVE_TEST_ORDER];
+
+ /* Build context */
+ secp256k1_context *ctx = secp256k1_context_create(SECP256K1_CONTEXT_SIGN | SECP256K1_CONTEXT_VERIFY);
+
+ /* TODO set z = 1, then do num_tests runs with random z values */
+
+ /* Generate the entire group */
+ secp256k1_gej_set_infinity(&groupj[0]);
+ secp256k1_ge_set_gej(&group[0], &groupj[0]);
+ for (i = 1; i < EXHAUSTIVE_TEST_ORDER; i++) {
+ /* Set a different random z-value for each Jacobian point */
+ secp256k1_fe z;
+ random_fe(&z);
+
+ secp256k1_gej_add_ge(&groupj[i], &groupj[i - 1], &secp256k1_ge_const_g);
+ secp256k1_ge_set_gej(&group[i], &groupj[i]);
+ secp256k1_gej_rescale(&groupj[i], &z);
+
+ /* Verify against ecmult_gen */
+ {
+ secp256k1_scalar scalar_i;
+ secp256k1_gej generatedj;
+ secp256k1_ge generated;
+
+ secp256k1_scalar_set_int(&scalar_i, i);
+ secp256k1_ecmult_gen(&ctx->ecmult_gen_ctx, &generatedj, &scalar_i);
+ secp256k1_ge_set_gej(&generated, &generatedj);
+
+ CHECK(group[i].infinity == 0);
+ CHECK(generated.infinity == 0);
+ CHECK(secp256k1_fe_equal_var(&generated.x, &group[i].x));
+ CHECK(secp256k1_fe_equal_var(&generated.y, &group[i].y));
+ }
+ }
+
+ /* Run the tests */
+#ifdef USE_ENDOMORPHISM
+ test_exhaustive_endomorphism(group, EXHAUSTIVE_TEST_ORDER);
+#endif
+ test_exhaustive_addition(group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_ecmult(ctx, group, groupj, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+
+#ifdef ENABLE_MODULE_RECOVERY
+ test_exhaustive_recovery_sign(ctx, group, EXHAUSTIVE_TEST_ORDER);
+ test_exhaustive_recovery_verify(ctx, group, EXHAUSTIVE_TEST_ORDER);
+#endif
+
+ secp256k1_context_destroy(ctx);
+ return 0;
+}
+
diff --git a/crypto/secp256k1/libsecp256k1/src/util.h b/crypto/secp256k1/libsecp256k1/src/util.h
new file mode 100644
index 000000000..4092a86c9
--- /dev/null
+++ b/crypto/secp256k1/libsecp256k1/src/util.h
@@ -0,0 +1,113 @@
+/**********************************************************************
+ * Copyright (c) 2013, 2014 Pieter Wuille *
+ * Distributed under the MIT software license, see the accompanying *
+ * file COPYING or http://www.opensource.org/licenses/mit-license.php.*
+ **********************************************************************/
+
+#ifndef _SECP256K1_UTIL_H_
+#define _SECP256K1_UTIL_H_
+
+#if defined HAVE_CONFIG_H
+#include "libsecp256k1-config.h"
+#endif
+
+#include
+#include
+#include
+
+typedef struct {
+ void (*fn)(const char *text, void* data);
+ const void* data;
+} secp256k1_callback;
+
+static SECP256K1_INLINE void secp256k1_callback_call(const secp256k1_callback * const cb, const char * const text) {
+ cb->fn(text, (void*)cb->data);
+}
+
+#ifdef DETERMINISTIC
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s\n", msg); \
+ abort(); \
+} while(0);
+#else
+#define TEST_FAILURE(msg) do { \
+ fprintf(stderr, "%s:%d: %s\n", __FILE__, __LINE__, msg); \
+ abort(); \
+} while(0)
+#endif
+
+#ifdef HAVE_BUILTIN_EXPECT
+#define EXPECT(x,c) __builtin_expect((x),(c))
+#else
+#define EXPECT(x,c) (x)
+#endif
+
+#ifdef DETERMINISTIC
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed"); \
+ } \
+} while(0)
+#else
+#define CHECK(cond) do { \
+ if (EXPECT(!(cond), 0)) { \
+ TEST_FAILURE("test condition failed: " #cond); \
+ } \
+} while(0)
+#endif
+
+/* Like assert(), but when VERIFY is defined, and side-effect safe. */
+#if defined(COVERAGE)
+#define VERIFY_CHECK(check)
+#define VERIFY_SETUP(stmt)
+#elif defined(VERIFY)
+#define VERIFY_CHECK CHECK
+#define VERIFY_SETUP(stmt) do { stmt; } while(0)
+#else
+#define VERIFY_CHECK(cond) do { (void)(cond); } while(0)
+#define VERIFY_SETUP(stmt)
+#endif
+
+static SECP256K1_INLINE void *checked_malloc(const secp256k1_callback* cb, size_t size) {
+ void *ret = malloc(size);
+ if (ret == NULL) {
+ secp256k1_callback_call(cb, "Out of memory");
+ }
+ return ret;
+}
+
+/* Macro for restrict, when available and not in a VERIFY build. */
+#if defined(SECP256K1_BUILD) && defined(VERIFY)
+# define SECP256K1_RESTRICT
+#else
+# if (!defined(__STDC_VERSION__) || (__STDC_VERSION__ < 199901L) )
+# if SECP256K1_GNUC_PREREQ(3,0)
+# define SECP256K1_RESTRICT __restrict__
+# elif (defined(_MSC_VER) && _MSC_VER >= 1400)
+# define SECP256K1_RESTRICT __restrict
+# else
+# define SECP256K1_RESTRICT
+# endif
+# else
+# define SECP256K1_RESTRICT restrict
+# endif
+#endif
+
+#if defined(_WIN32)
+# define I64FORMAT "I64d"
+# define I64uFORMAT "I64u"
+#else
+# define I64FORMAT "lld"
+# define I64uFORMAT "llu"
+#endif
+
+#if defined(HAVE___INT128)
+# if defined(__GNUC__)
+# define SECP256K1_GNUC_EXT __extension__
+# else
+# define SECP256K1_GNUC_EXT
+# endif
+SECP256K1_GNUC_EXT typedef unsigned __int128 uint128_t;
+#endif
+
+#endif
diff --git a/crypto/secp256k1/panic_cb.go b/crypto/secp256k1/panic_cb.go
new file mode 100644
index 000000000..6d59a1d24
--- /dev/null
+++ b/crypto/secp256k1/panic_cb.go
@@ -0,0 +1,21 @@
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+package secp256k1
+
+import "C"
+import "unsafe"
+
+// Callbacks for converting libsecp256k1 internal faults into
+// recoverable Go panics.
+
+//export secp256k1GoPanicIllegal
+func secp256k1GoPanicIllegal(msg *C.char, data unsafe.Pointer) {
+ panic("illegal argument: " + C.GoString(msg))
+}
+
+//export secp256k1GoPanicError
+func secp256k1GoPanicError(msg *C.char, data unsafe.Pointer) {
+ panic("internal error: " + C.GoString(msg))
+}
diff --git a/crypto/secp256k1/secp256.go b/crypto/secp256k1/secp256.go
new file mode 100644
index 000000000..5ed6c3a1e
--- /dev/null
+++ b/crypto/secp256k1/secp256.go
@@ -0,0 +1,174 @@
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+// Package secp256k1 wraps the bitcoin secp256k1 C library.
+package secp256k1
+
+/*
+#cgo CFLAGS: -I./libsecp256k1
+#cgo CFLAGS: -I./libsecp256k1/src/
+#define USE_NUM_NONE
+#define USE_FIELD_10X26
+#define USE_FIELD_INV_BUILTIN
+#define USE_SCALAR_8X32
+#define USE_SCALAR_INV_BUILTIN
+#define NDEBUG
+#include "./libsecp256k1/src/secp256k1.c"
+#include "./libsecp256k1/src/modules/recovery/main_impl.h"
+#include "ext.h"
+
+typedef void (*callbackFunc) (const char* msg, void* data);
+extern void secp256k1GoPanicIllegal(const char* msg, void* data);
+extern void secp256k1GoPanicError(const char* msg, void* data);
+*/
+import "C"
+
+import (
+ "errors"
+ "math/big"
+ "unsafe"
+)
+
+var context *C.secp256k1_context
+
+func init() {
+ // around 20 ms on a modern CPU.
+ context = C.secp256k1_context_create_sign_verify()
+ C.secp256k1_context_set_illegal_callback(context, C.callbackFunc(C.secp256k1GoPanicIllegal), nil)
+ C.secp256k1_context_set_error_callback(context, C.callbackFunc(C.secp256k1GoPanicError), nil)
+}
+
+var (
+ // ErrInvalidMsgLen is invalid message length, need 32 bytes
+ ErrInvalidMsgLen = errors.New("invalid message length, need 32 bytes")
+ // ErrInvalidSignatureLen is invalid signature length
+ ErrInvalidSignatureLen = errors.New("invalid signature length")
+ // ErrInvalidRecoveryID is invalid signature recovery id
+ ErrInvalidRecoveryID = errors.New("invalid signature recovery id")
+ // ErrInvalidKey is invalid private key
+ ErrInvalidKey = errors.New("invalid private key")
+ // ErrInvalidPubkey is invalid public key
+ ErrInvalidPubkey = errors.New("invalid public key")
+ // ErrSignFailed is signing failed
+ ErrSignFailed = errors.New("signing failed")
+ // ErrRecoverFailed is recovery failed
+ ErrRecoverFailed = errors.New("recovery failed")
+)
+
+// Sign creates a recoverable ECDSA signature.
+// The produced signature is in the 65-byte [R || S || V] format where V is 0 or 1.
+//
+// The caller is responsible for ensuring that msg cannot be chosen
+// directly by an attacker. It is usually preferable to use a cryptographic
+// hash function on any input before handing it to this function.
+func Sign(msg []byte, seckey []byte) ([]byte, error) {
+ if len(msg) != 32 {
+ return nil, ErrInvalidMsgLen
+ }
+ if len(seckey) != 32 {
+ return nil, ErrInvalidKey
+ }
+ seckeydata := (*C.uchar)(unsafe.Pointer(&seckey[0]))
+ if C.secp256k1_ec_seckey_verify(context, seckeydata) != 1 {
+ return nil, ErrInvalidKey
+ }
+
+ var (
+ msgdata = (*C.uchar)(unsafe.Pointer(&msg[0]))
+ noncefunc = C.secp256k1_nonce_function_rfc6979
+ sigstruct C.secp256k1_ecdsa_recoverable_signature
+ )
+ if C.secp256k1_ecdsa_sign_recoverable(context, &sigstruct, msgdata, seckeydata, noncefunc, nil) == 0 {
+ return nil, ErrSignFailed
+ }
+
+ var (
+ sig = make([]byte, 65)
+ sigdata = (*C.uchar)(unsafe.Pointer(&sig[0]))
+ recid C.int
+ )
+ C.secp256k1_ecdsa_recoverable_signature_serialize_compact(context, sigdata, &recid, &sigstruct)
+ sig[64] = byte(recid) // add back recid to get 65 bytes sig
+ return sig, nil
+}
+
+// RecoverPubkey returns the public key of the signer.
+// msg must be the 32-byte hash of the message to be signed.
+// sig must be a 65-byte compact ECDSA signature containing the
+// recovery id as the last element.
+func RecoverPubkey(msg []byte, sig []byte) ([]byte, error) {
+ if len(msg) != 32 {
+ return nil, ErrInvalidMsgLen
+ }
+ if err := checkSignature(sig); err != nil {
+ return nil, err
+ }
+
+ var (
+ pubkey = make([]byte, 65)
+ sigdata = (*C.uchar)(unsafe.Pointer(&sig[0]))
+ msgdata = (*C.uchar)(unsafe.Pointer(&msg[0]))
+ )
+ if C.secp256k1_ext_ecdsa_recover(context, (*C.uchar)(unsafe.Pointer(&pubkey[0])), sigdata, msgdata) == 0 {
+ return nil, ErrRecoverFailed
+ }
+ return pubkey, nil
+}
+
+// VerifySignature checks that the given pubkey created signature over message.
+// The signature should be in [R || S] format.
+func VerifySignature(pubkey, msg, signature []byte) bool {
+ if len(msg) != 32 || len(signature) != 64 || len(pubkey) == 0 {
+ return false
+ }
+ sigdata := (*C.uchar)(unsafe.Pointer(&signature[0]))
+ msgdata := (*C.uchar)(unsafe.Pointer(&msg[0]))
+ keydata := (*C.uchar)(unsafe.Pointer(&pubkey[0]))
+ return C.secp256k1_ext_ecdsa_verify(context, sigdata, msgdata, keydata, C.size_t(len(pubkey))) != 0
+}
+
+// DecompressPubkey parses a public key in the 33-byte compressed format.
+// It returns non-nil coordinates if the public key is valid.
+func DecompressPubkey(pubkey []byte) (x, y *big.Int) {
+ if len(pubkey) != 33 {
+ return nil, nil
+ }
+ var (
+ pubkeydata = (*C.uchar)(unsafe.Pointer(&pubkey[0]))
+ pubkeylen = C.size_t(len(pubkey))
+ out = make([]byte, 65)
+ outdata = (*C.uchar)(unsafe.Pointer(&out[0]))
+ outlen = C.size_t(len(out))
+ )
+ if C.secp256k1_ext_reencode_pubkey(context, outdata, outlen, pubkeydata, pubkeylen) == 0 {
+ return nil, nil
+ }
+ return new(big.Int).SetBytes(out[1:33]), new(big.Int).SetBytes(out[33:])
+}
+
+// CompressPubkey encodes a public key to 33-byte compressed format.
+func CompressPubkey(x, y *big.Int) []byte {
+ var (
+ pubkey = S256().Marshal(x, y)
+ pubkeydata = (*C.uchar)(unsafe.Pointer(&pubkey[0]))
+ pubkeylen = C.size_t(len(pubkey))
+ out = make([]byte, 33)
+ outdata = (*C.uchar)(unsafe.Pointer(&out[0]))
+ outlen = C.size_t(len(out))
+ )
+ if C.secp256k1_ext_reencode_pubkey(context, outdata, outlen, pubkeydata, pubkeylen) == 0 {
+ panic("libsecp256k1 error")
+ }
+ return out
+}
+
+func checkSignature(sig []byte) error {
+ if len(sig) != 65 {
+ return ErrInvalidSignatureLen
+ }
+ if sig[64] >= 4 {
+ return ErrInvalidRecoveryID
+ }
+ return nil
+}
diff --git a/crypto/secp256k1/secp256_test.go b/crypto/secp256k1/secp256_test.go
new file mode 100644
index 000000000..ef2a3a379
--- /dev/null
+++ b/crypto/secp256k1/secp256_test.go
@@ -0,0 +1,238 @@
+// Copyright 2015 Jeffrey Wilcke, Felix Lange, Gustav Simonsson. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be found in
+// the LICENSE file.
+
+package secp256k1
+
+import (
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/elliptic"
+ "crypto/rand"
+ "encoding/hex"
+ "io"
+ "testing"
+)
+
+const TestCount = 1000
+
+func generateKeyPair() (pubkey, privkey []byte) {
+ key, err := ecdsa.GenerateKey(S256(), rand.Reader)
+ if err != nil {
+ panic(err)
+ }
+ pubkey = elliptic.Marshal(S256(), key.X, key.Y)
+
+ privkey = make([]byte, 32)
+ blob := key.D.Bytes()
+ copy(privkey[32-len(blob):], blob)
+
+ return pubkey, privkey
+}
+
+func csprngEntropy(n int) []byte {
+ buf := make([]byte, n)
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ panic("reading from crypto/rand failed: " + err.Error())
+ }
+ return buf
+}
+
+func randSig() []byte {
+ sig := csprngEntropy(65)
+ sig[32] &= 0x70
+ sig[64] %= 4
+ return sig
+}
+
+// tests for malleability
+// highest bit of signature ECDSA s value must be 0, in the 33th byte
+func compactSigCheck(t *testing.T, sig []byte) {
+ var b = int(sig[32])
+ if b < 0 {
+ t.Errorf("highest bit is negative: %d", b)
+ }
+ if ((b >> 7) == 1) != ((b & 0x80) == 0x80) {
+ t.Errorf("highest bit: %d bit >> 7: %d", b, b>>7)
+ }
+ if (b & 0x80) == 0x80 {
+ t.Errorf("highest bit: %d bit & 0x80: %d", b, b&0x80)
+ }
+}
+
+func TestSignatureValidity(t *testing.T) {
+ pubkey, seckey := generateKeyPair()
+ msg := csprngEntropy(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Errorf("signature error: %s", err)
+ }
+ compactSigCheck(t, sig)
+ if len(pubkey) != 65 {
+ t.Errorf("pubkey length mismatch: want: 65 have: %d", len(pubkey))
+ }
+ if len(seckey) != 32 {
+ t.Errorf("seckey length mismatch: want: 32 have: %d", len(seckey))
+ }
+ if len(sig) != 65 {
+ t.Errorf("sig length mismatch: want: 65 have: %d", len(sig))
+ }
+ recid := int(sig[64])
+ if recid > 4 || recid < 0 {
+ t.Errorf("sig recid mismatch: want: within 0 to 4 have: %d", int(sig[64]))
+ }
+}
+
+func TestInvalidRecoveryID(t *testing.T) {
+ _, seckey := generateKeyPair()
+ msg := csprngEntropy(32)
+ sig, _ := Sign(msg, seckey)
+ sig[64] = 99
+ _, err := RecoverPubkey(msg, sig)
+ if err != ErrInvalidRecoveryID {
+ t.Fatalf("got %q, want %q", err, ErrInvalidRecoveryID)
+ }
+}
+
+func TestSignAndRecover(t *testing.T) {
+ pubkey1, seckey := generateKeyPair()
+ msg := csprngEntropy(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Errorf("signature error: %s", err)
+ }
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Errorf("recover error: %s", err)
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+}
+
+func TestSignDeterministic(t *testing.T) {
+ _, seckey := generateKeyPair()
+ msg := make([]byte, 32)
+ copy(msg, "hi there")
+
+ sig1, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ sig2, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !bytes.Equal(sig1, sig2) {
+ t.Fatal("signatures not equal")
+ }
+}
+
+func TestRandomMessagesWithSameKey(t *testing.T) {
+ pubkey, seckey := generateKeyPair()
+ keys := func() ([]byte, []byte) {
+ return pubkey, seckey
+ }
+ signAndRecoverWithRandomMessages(t, keys)
+}
+
+func TestRandomMessagesWithRandomKeys(t *testing.T) {
+ keys := func() ([]byte, []byte) {
+ pubkey, seckey := generateKeyPair()
+ return pubkey, seckey
+ }
+ signAndRecoverWithRandomMessages(t, keys)
+}
+
+func signAndRecoverWithRandomMessages(t *testing.T, keys func() ([]byte, []byte)) {
+ for i := 0; i < TestCount; i++ {
+ pubkey1, seckey := keys()
+ msg := csprngEntropy(32)
+ sig, err := Sign(msg, seckey)
+ if err != nil {
+ t.Fatalf("signature error: %s", err)
+ }
+ if sig == nil {
+ t.Fatal("signature is nil")
+ }
+ compactSigCheck(t, sig)
+
+ // TODO: why do we flip around the recovery id?
+ sig[len(sig)-1] %= 4
+
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Fatalf("recover error: %s", err)
+ }
+ if pubkey2 == nil {
+ t.Error("pubkey is nil")
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+ }
+}
+
+func TestRecoveryOfRandomSignature(t *testing.T) {
+ pubkey1, _ := generateKeyPair()
+ msg := csprngEntropy(32)
+
+ for i := 0; i < TestCount; i++ {
+ // recovery can sometimes work, but if so should always give wrong pubkey
+ pubkey2, _ := RecoverPubkey(msg, randSig())
+ if bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("iteration: %d: pubkey mismatch: do NOT want %x: ", i, pubkey2)
+ }
+ }
+}
+
+func TestRandomMessagesAgainstValidSig(t *testing.T) {
+ pubkey1, seckey := generateKeyPair()
+ msg := csprngEntropy(32)
+ sig, _ := Sign(msg, seckey)
+
+ for i := 0; i < TestCount; i++ {
+ msg = csprngEntropy(32)
+ pubkey2, _ := RecoverPubkey(msg, sig)
+ // recovery can sometimes work, but if so should always give wrong pubkey
+ if bytes.Equal(pubkey1, pubkey2) {
+ t.Fatalf("iteration: %d: pubkey mismatch: do NOT want %x: ", i, pubkey2)
+ }
+ }
+}
+
+// Useful when the underlying libsecp256k1 API changes to quickly
+// check only recover function without use of signature function
+func TestRecoverSanity(t *testing.T) {
+ msg, _ := hex.DecodeString("ce0677bb30baa8cf067c88db9811f4333d131bf8bcf12fe7065d211dce971008")
+ sig, _ := hex.DecodeString("90f27b8b488db00b00606796d2987f6a5f59ae62ea05effe84fef5b8b0e549984a691139ad57a3f0b906637673aa2f63d1f55cb1a69199d4009eea23ceaddc9301")
+ pubkey1, _ := hex.DecodeString("04e32df42865e97135acfb65f3bae71bdc86f4d49150ad6a440b6f15878109880a0a2b2667f7e725ceea70c673093bf67663e0312623c8e091b13cf2c0f11ef652")
+ pubkey2, err := RecoverPubkey(msg, sig)
+ if err != nil {
+ t.Fatalf("recover error: %s", err)
+ }
+ if !bytes.Equal(pubkey1, pubkey2) {
+ t.Errorf("pubkey mismatch: want: %x have: %x", pubkey1, pubkey2)
+ }
+}
+
+func BenchmarkSign(b *testing.B) {
+ _, seckey := generateKeyPair()
+ msg := csprngEntropy(32)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ Sign(msg, seckey)
+ }
+}
+
+func BenchmarkRecover(b *testing.B) {
+ msg := csprngEntropy(32)
+ _, seckey := generateKeyPair()
+ sig, _ := Sign(msg, seckey)
+ b.ResetTimer()
+
+ for i := 0; i < b.N; i++ {
+ RecoverPubkey(msg, sig)
+ }
+}
diff --git a/docker-compose.yml b/docker-compose.yml
index c201e34da..3f6ab07a3 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -5,6 +5,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_bp_0
restart: always
+ ports:
+ - "11099:4661"
environment:
COVENANT_ROLE: blockproducer
COVENANT_CONF: ./node_0/config.yaml
@@ -22,6 +24,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_bp_1
restart: always
+ ports:
+ - "11100:4661"
environment:
COVENANT_ROLE: blockproducer
COVENANT_CONF: ./node_1/config.yaml
@@ -39,6 +43,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_bp_2
restart: always
+ ports:
+ - "11101:4661"
environment:
COVENANT_ROLE: blockproducer
COVENANT_CONF: ./node_2/config.yaml
@@ -56,6 +62,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_miner_0
restart: always
+ ports:
+ - "11102:4661"
environment:
COVENANT_ROLE: miner
COVENANT_CONF: ./node_miner_0/config.yaml
@@ -73,6 +81,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_miner_1
restart: always
+ ports:
+ - "11103:4661"
environment:
COVENANT_ROLE: miner
COVENANT_CONF: ./node_miner_1/config.yaml
@@ -90,6 +100,8 @@ services:
image: covenantsql.io/covenantsql:latest
container_name: covenantsql_miner_2
restart: always
+ ports:
+ - "11104:4661"
environment:
COVENANT_ROLE: miner
COVENANT_CONF: ./node_miner_2/config.yaml
@@ -104,11 +116,14 @@ services:
max-size: "1m"
max-file: "10"
covenantsql_adapter:
- image: covenantsql.io/covenantsql_adapter:latest
+ image: covenantsql.io/covenantsql:latest
container_name: covenantsql_adapter
restart: always
ports:
- "11105:4661"
+ environment:
+ COVENANT_ROLE: adapter
+ COVENANT_CONF: ./node_c/config.yaml
volumes:
- ./test/service/node_c/config.yaml:/app/config.yaml
- ./test/service/node_c/private.key:/app/private.key
@@ -136,6 +151,26 @@ services:
options:
max-size: "1m"
max-file: "10"
+ covenantsql_mysql_adapter:
+ image: covenantsql.io/covenantsql:latest
+ container_name: covenantsql_mysql_adapter
+ restart: always
+ ports:
+ - "11107:4664"
+ command: ["-listen", "0.0.0.0:4664"]
+ environment:
+ COVENANT_ROLE: mysql-adapter
+ COVENANT_CONF: ./node_mysql_adapter/config.yaml
+ volumes:
+ - ./test/service/node_mysql_adapter/:/app/node_mysql_adapter/
+ networks:
+ default:
+ ipv4_address: 172.254.1.10
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "1m"
+ max-file: "10"
networks:
default:
diff --git a/kayak/boltdb_store.go b/kayak/boltdb_store.go
index 2c738f148..23ab9ce90 100644
--- a/kayak/boltdb_store.go
+++ b/kayak/boltdb_store.go
@@ -1,4 +1,5 @@
/*
+ * Copyright 2018 HashiCorp.
* Copyright 2018 The CovenantSQL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/kayak/boltdb_store_test.go b/kayak/boltdb_store_test.go
index 5c1bdadaa..b022bf62d 100644
--- a/kayak/boltdb_store_test.go
+++ b/kayak/boltdb_store_test.go
@@ -1,4 +1,5 @@
/*
+ * Copyright 2018 HashiCorp.
* Copyright 2018 The CovenantSQL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/kayak/doc.go b/kayak/doc.go
index 82ea99ba3..fc2851c61 100644
--- a/kayak/doc.go
+++ b/kayak/doc.go
@@ -16,5 +16,6 @@
/*
Package kayak is a simple configurable multi-purpose consensus sdk.
+The storage implementations contains code refactored from original hashicorp/raft and hashicorp/raft-boltdb repository.
*/
package kayak
diff --git a/kayak/inmem_store_test.go b/kayak/inmem_store_test.go
index 95152e64b..9eee8dadb 100644
--- a/kayak/inmem_store_test.go
+++ b/kayak/inmem_store_test.go
@@ -1,4 +1,5 @@
/*
+ * Copyright 2018 HashiCorp.
* Copyright 2018 The CovenantSQL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
diff --git a/kayak/twopc_runner.go b/kayak/twopc_runner.go
index 0cbb018b5..e36cd3648 100644
--- a/kayak/twopc_runner.go
+++ b/kayak/twopc_runner.go
@@ -510,7 +510,9 @@ func (r *TwoPCRunner) processPrepare(req Request) {
}
// init context
- r.currentContext, _ = context.WithTimeout(context.Background(), r.config.ProcessTimeout)
+ var cancelFunc context.CancelFunc
+ r.currentContext, cancelFunc = context.WithTimeout(context.Background(), r.config.ProcessTimeout)
+ _ = cancelFunc
// get log
var l *Log
diff --git a/kayak/types.go b/kayak/types.go
index c0355f64c..15d9c4749 100644
--- a/kayak/types.go
+++ b/kayak/types.go
@@ -221,7 +221,8 @@ func (c *Peers) Serialize() []byte {
// Sign generates signature.
func (c *Peers) Sign(signer *asymmetric.PrivateKey) error {
- sig, err := signer.Sign(c.Serialize())
+ h := hash.THashB(c.Serialize())
+ sig, err := signer.Sign(h)
if err != nil {
return fmt.Errorf("sign peer configuration failed: %s", err.Error())
@@ -234,7 +235,9 @@ func (c *Peers) Sign(signer *asymmetric.PrivateKey) error {
// Verify verify signature.
func (c *Peers) Verify() bool {
- return c.Signature.Verify(c.Serialize(), c.PubKey)
+ h := hash.THashB(c.Serialize())
+
+ return c.Signature.Verify(h, c.PubKey)
}
func (c *Peers) String() string {
diff --git a/logo/arch.png b/logo/arch.png
old mode 100644
new mode 100755
index 842204e00..b0cb31b30
Binary files a/logo/arch.png and b/logo/arch.png differ
diff --git a/rpc/client_test.go b/rpc/client_test.go
index e826263a3..9db547150 100644
--- a/rpc/client_test.go
+++ b/rpc/client_test.go
@@ -47,7 +47,7 @@ func TestDial(t *testing.T) {
So(c, ShouldBeNil)
So(err, ShouldNotBeNil)
- kms.SetLocalNodeIDNonce([]byte(nodeID), &mine.Uint256{1, 1, 1, 1})
+ kms.SetLocalNodeIDNonce([]byte(nodeID), &mine.Uint256{A: 1, B: 1, C: 1, D: 1})
c, err = dial("tcp", l.Addr().String(), nil, nil, false)
So(c, ShouldNotBeNil)
So(err, ShouldBeNil)
diff --git a/rpc/leak_test.go b/rpc/leak_test.go
index b6fe70dce..73a26ccb4 100644
--- a/rpc/leak_test.go
+++ b/rpc/leak_test.go
@@ -46,16 +46,16 @@ func TestSessionPool_SessionBroken(t *testing.T) {
os.Remove(FJ(testWorkingDir, "./leak/leader/kayak.db"))
leader, err := utils.RunCommandNB(
- FJ(baseDir, "./bin/covenantsqld"),
+ FJ(baseDir, "./bin/cqld"),
[]string{"-config", FJ(testWorkingDir, "./leak/leader.yaml")},
"leak", testWorkingDir, logDir, false,
)
defer func() {
- leader.Process.Signal(syscall.SIGKILL)
+ leader.Cmd.Process.Signal(syscall.SIGKILL)
}()
- log.Debugf("leader pid %d", leader.Process.Pid)
+ log.Debugf("leader pid %d", leader.Cmd.Process.Pid)
time.Sleep(5 * time.Second)
route.InitKMS(conf.GConf.PubKeyStoreFile)
diff --git a/sqlchain/chain.go b/sqlchain/chain.go
index 924ff91b5..8817ed6e4 100644
--- a/sqlchain/chain.go
+++ b/sqlchain/chain.go
@@ -26,6 +26,7 @@ import (
"time"
pt "github.com/CovenantSQL/CovenantSQL/blockproducer/types"
+ "github.com/CovenantSQL/CovenantSQL/crypto"
"github.com/CovenantSQL/CovenantSQL/crypto/asymmetric"
"github.com/CovenantSQL/CovenantSQL/crypto/hash"
"github.com/CovenantSQL/CovenantSQL/crypto/kms"
@@ -38,6 +39,10 @@ import (
"github.com/CovenantSQL/CovenantSQL/utils/log"
wt "github.com/CovenantSQL/CovenantSQL/worker/types"
"github.com/coreos/bbolt"
+ "github.com/pkg/errors"
+ "github.com/syndtr/goleveldb/leveldb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
)
var (
@@ -47,9 +52,17 @@ var (
metaHeightIndexBucket = []byte("covenantsql-query-height-index-bucket")
metaRequestIndexBucket = []byte("covenantsql-query-request-index-bucket")
metaResponseIndexBucket = []byte("covenantsql-query-response-index-bucket")
- metaAckIndexBucket = []byte("covenantsql-query-ack-index-bucket")
+ metaAckIndexBucket = [4]byte{'Q', 'A', 'C', 'K'}
+ leveldbConf = opt.Options{}
)
+func init() {
+ leveldbConf.BlockSize = 4 * 1024 * 1024
+ leveldbConf.Compression = opt.SnappyCompression
+ leveldbConf.WriteBuffer = 64 * 1024 * 1024
+ leveldbConf.BlockCacheCapacity = 2 * leveldbConf.WriteBuffer
+}
+
// heightToKey converts a height in int32 to a key in bytes.
func heightToKey(h int32) (key []byte) {
key = make([]byte, 4)
@@ -64,11 +77,12 @@ func keyToHeight(k []byte) int32 {
// Chain represents a sql-chain.
type Chain struct {
- db *bolt.DB
- bi *blockIndex
- qi *queryIndex
- cl *rpc.Caller
- rt *runtime
+ db *bolt.DB
+ ldb *leveldb.DB
+ bi *blockIndex
+ qi *queryIndex
+ cl *rpc.Caller
+ rt *runtime
stopCh chan struct{}
blocks chan *ct.Block
@@ -98,14 +112,12 @@ func NewChain(c *Config) (chain *Chain, err error) {
}
err = c.Genesis.VerifyAsGenesis()
-
if err != nil {
return
}
// Open DB file
db, err := bolt.Open(c.DataFile, 0600, nil)
-
if err != nil {
return
}
@@ -128,9 +140,18 @@ func NewChain(c *Config) (chain *Chain, err error) {
return
}
+ // Open LevelDB
+ ldbFile := c.DataFile + ".ldb"
+ ldb, err := leveldb.OpenFile(ldbFile, &leveldbConf)
+ if err != nil {
+ err = errors.Wrapf(err, "open leveldb %s", ldbFile)
+ return
+ }
+
// Create chain state
chain = &Chain{
db: db,
+ ldb: ldb,
bi: newBlockIndex(c),
qi: newQueryIndex(),
cl: rpc.NewCaller(),
@@ -158,14 +179,22 @@ func NewChain(c *Config) (chain *Chain, err error) {
func LoadChain(c *Config) (chain *Chain, err error) {
// Open DB file
db, err := bolt.Open(c.DataFile, 0600, nil)
+ if err != nil {
+ return
+ }
+ // Open LevelDB
+ ldbFile := c.DataFile + ".ldb"
+ ldb, err := leveldb.OpenFile(ldbFile, &leveldbConf)
if err != nil {
+ err = errors.Wrapf(err, "open leveldb %s", ldbFile)
return
}
// Create chain state
chain = &Chain{
db: db,
+ ldb: ldb,
bi: newBlockIndex(c),
qi: newQueryIndex(),
cl: rpc.NewCaller(),
@@ -276,21 +305,43 @@ func LoadChain(c *Config) (chain *Chain, err error) {
}
}
- if acks := heights.Bucket(k).Bucket(metaAckIndexBucket); acks != nil {
- if err = acks.ForEach(func(k []byte, v []byte) (err error) {
- var ack = &wt.SignedAckHeader{}
- if err = utils.DecodeMsgPack(v, ack); err != nil {
- return
- }
- log.WithFields(log.Fields{
- "height": h,
- "header": ack.HeaderHash.String(),
- }).Debug("Loaded new ack header")
- return chain.qi.addAck(h, ack)
- }); err != nil {
+ ldbKey := make([]byte, 0, len(metaAckIndexBucket)+len(k)+hash.HashSize)
+ ldbKey = append(append(ldbKey, metaAckIndexBucket[:]...), k...)
+ iter := ldb.NewIterator(util.BytesPrefix(ldbKey), nil)
+ defer iter.Release()
+ for iter.Next() {
+ var ack = &wt.SignedAckHeader{}
+ if err = utils.DecodeMsgPack(iter.Value(), ack); err != nil {
return
}
+ log.WithFields(log.Fields{
+ "height": h,
+ "header": ack.HeaderHash.String(),
+ }).Debug("Loaded new ack header")
+ return chain.qi.addAck(h, ack)
}
+ err = iter.Error()
+ if err != nil {
+ err = errors.Wrap(err, "load new ack header")
+ return
+ }
+
+ //acks := heights.Bucket(k).Bucket(metaAckIndexBucket)
+ //if acks != nil {
+ // if err = acks.ForEach(func(k []byte, v []byte) (err error) {
+ // var ack = &wt.SignedAckHeader{}
+ // if err = utils.DecodeMsgPack(v, ack); err != nil {
+ // return
+ // }
+ // log.WithFields(log.Fields{
+ // "height": h,
+ // "header": ack.HeaderHash.String(),
+ // }).Debug("Loaded new ack header")
+ // return chain.qi.addAck(h, ack)
+ // }); err != nil {
+ // return
+ // }
+ //}
return
}); err != nil {
@@ -379,10 +430,6 @@ func ensureHeight(tx *bolt.Tx, k []byte) (hb *bolt.Bucket, err error) {
if _, err = hb.CreateBucketIfNotExists(metaResponseIndexBucket); err != nil {
return
}
-
- if _, err = hb.CreateBucketIfNotExists(metaAckIndexBucket); err != nil {
- return
- }
}
return
@@ -426,16 +473,17 @@ func (c *Chain) pushAckedQuery(ack *wt.SignedAckHeader) (err error) {
}
return c.db.Update(func(tx *bolt.Tx) (err error) {
- b, err := ensureHeight(tx, k)
-
+ _, err = ensureHeight(tx, k)
if err != nil {
return
}
- // TODO(leventeliu): this doesn't seem right to use an error to detect key existence.
- if err = b.Bucket(metaAckIndexBucket).Put(
- ack.HeaderHash[:], enc.Bytes(),
- ); err != nil {
+ ldbKey := make([]byte, 0, len(metaAckIndexBucket)+len(k)+hash.HashSize)
+ ldbKey = append(append(append(ldbKey, metaAckIndexBucket[:]...), k...), ack.HeaderHash[:]...)
+ err = c.ldb.Put(ldbKey, enc.Bytes(), nil)
+ //err = b.Bucket(metaAckIndexBucket).Put(ack.HeaderHash[:], enc.Bytes())
+ if err != nil {
+ err = errors.Wrapf(err, "put %s %d %s", string(metaAckIndexBucket[:]), h, ack.HeaderHash)
return
}
@@ -836,6 +884,12 @@ func (c *Chain) Stop() (err error) {
"peer": c.rt.getPeerInfoString(),
"time": c.rt.getChainTimeString(),
}).Debug("Chain database closed")
+ // Close LevelDB file
+ err = c.ldb.Close()
+ log.WithFields(log.Fields{
+ "peer": c.rt.getPeerInfoString(),
+ "time": c.rt.getChainTimeString(),
+ }).Debug("Chain database closed")
return
}
@@ -860,30 +914,33 @@ func (c *Chain) FetchBlock(height int32) (b *ct.Block, err error) {
func (c *Chain) FetchAckedQuery(height int32, header *hash.Hash) (
ack *wt.SignedAckHeader, err error,
) {
- if ack, err = c.qi.getAck(height, header); err != nil {
- err = c.db.View(func(tx *bolt.Tx) (err error) {
- for i := height - c.rt.queryTTL; i <= height; i++ {
- if b := tx.Bucket(metaBucket[:]).Bucket(metaHeightIndexBucket).Bucket(
- heightToKey(height)); b != nil {
- if v := b.Bucket(metaAckIndexBucket).Get(header[:]); v != nil {
- dec := &wt.SignedAckHeader{}
-
- if err = utils.DecodeMsgPack(v, dec); err != nil {
- ack = dec
- break
- }
+ if ack, err = c.qi.getAck(height, header); err == nil && ack != nil {
+ return
+ }
+ err = c.db.View(func(tx *bolt.Tx) (err error) {
+ var hb = tx.Bucket(metaBucket[:]).Bucket(metaHeightIndexBucket)
+ for h := height - c.rt.queryTTL - 1; h <= height; h++ {
+ k := heightToKey(h)
+ if ab := hb.Bucket(heightToKey(h)); ab != nil {
+ ldbKey := make([]byte, 0, len(metaAckIndexBucket)+len(k)+hash.HashSize)
+ ldbKey = append(append(append(ldbKey, metaAckIndexBucket[:]...), k...), header[:]...)
+ v, _ := c.ldb.Get(ldbKey, nil)
+ //v := ab.Bucket(metaAckIndexBucket).Get(header[:])
+ if v != nil {
+ var dec = &wt.SignedAckHeader{}
+ if err = utils.DecodeMsgPack(v, dec); err != nil {
+ return
}
+ ack = dec
+ break
}
}
-
- if ack == nil {
- err = ErrAckQueryNotFound
- }
-
- return
- })
- }
-
+ }
+ if ack == nil {
+ err = ErrAckQueryNotFound
+ }
+ return
+ })
return
}
@@ -925,10 +982,11 @@ func (c *Chain) syncAckedQuery(height int32, header *hash.Hash, id proto.NodeID)
func (c *Chain) queryOrSyncAckedQuery(height int32, header *hash.Hash, id proto.NodeID) (
ack *wt.SignedAckHeader, err error,
) {
- if ack, err = c.FetchAckedQuery(height, header); err != nil || ack != nil || id == c.rt.getServer().ID {
+ if ack, err = c.FetchAckedQuery(
+ height, header,
+ ); (err == nil && ack != nil) || id == c.rt.getServer().ID {
return
}
-
return c.syncAckedQuery(height, header, id)
}
@@ -1082,7 +1140,7 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error)
highBlock = n.block
- if addr, err = utils.PubKeyHash(n.block.Signee()); err != nil {
+ if addr, err = crypto.PubKeyHash(n.block.Signee()); err != nil {
return
}
@@ -1102,7 +1160,7 @@ func (c *Chain) getBilling(low, high int32) (req *pt.BillingRequest, err error)
return
}
- if addr, err = utils.PubKeyHash(ack.SignedResponseHeader().Signee); err != nil {
+ if addr, err = crypto.PubKeyHash(ack.SignedResponseHeader().Signee); err != nil {
return
}
diff --git a/sqlchain/chain_test.go b/sqlchain/chain_test.go
index 50c74b5d0..fd69cdb59 100644
--- a/sqlchain/chain_test.go
+++ b/sqlchain/chain_test.go
@@ -290,6 +290,34 @@ func TestMultiChain(t *testing.T) {
}(v.chain)
}
+ // Should be able to fetch all acks in all peers
+ for _, v := range chains {
+ defer func(c *Chain) {
+ var ch = c.rt.getHead().Height
+ for i := int32(0); i <= ch; i++ {
+ var node *blockNode
+ if node = c.rt.getHead().node.ancestor(i); node == nil {
+ t.Logf("Block at height %d not found in peer %s, continue",
+ i, c.rt.getPeerInfoString())
+ continue
+ }
+ t.Logf("Checking block %v at height %d in peer %s",
+ node.block.BlockHash(), i, c.rt.getPeerInfoString())
+ for _, v := range node.block.Queries {
+ if ack, err := c.queryOrSyncAckedQuery(
+ i, v, node.block.Producer(),
+ ); err != nil && ack == nil {
+ t.Errorf("Failed to fetch ack %v at height %d in peer %s: %v",
+ v, i, c.rt.getPeerInfoString(), err)
+ } else {
+ t.Logf("Successed to fetch ack %v at height %d in peer %s",
+ v, i, c.rt.getPeerInfoString())
+ }
+ }
+ }
+ }(v.chain)
+ }
+
// Create some random clients to push new queries
for i, v := range chains {
sC := make(chan struct{})
diff --git a/sqlchain/observer.go b/sqlchain/observer.go
index 0ef4ae179..ca76876df 100644
--- a/sqlchain/observer.go
+++ b/sqlchain/observer.go
@@ -176,9 +176,9 @@ func (r *observerReplicator) replicate() {
// fetch acks in block
for _, h := range block.Queries {
var ack *wt.SignedAckHeader
- if ack, err = r.c.queryOrSyncAckedQuery(r.height, h, block.Producer()); err != nil {
+ if ack, err = r.c.queryOrSyncAckedQuery(r.height, h, block.Producer()); err != nil || ack == nil {
log.Warningf("fetch ack %v in block height %v failed: %v", h, r.height, err)
- return
+ continue
}
// send advise to this block
diff --git a/sqlchain/storage/storage.go b/sqlchain/storage/storage.go
index 88cd25749..d0190ba94 100644
--- a/sqlchain/storage/storage.go
+++ b/sqlchain/storage/storage.go
@@ -276,6 +276,11 @@ func (s *Storage) Query(ctx context.Context, queries []Query) (columns []string,
return
}
+ // if there is empty columns, treat result as empty
+ if len(columns) == 0 {
+ return
+ }
+
// get types meta
if types, err = s.transformColumnTypes(rows.ColumnTypes()); err != nil {
return
diff --git a/sqlchain/xxx_test.go b/sqlchain/xxx_test.go
index 16cfae814..1aea3d25d 100644
--- a/sqlchain/xxx_test.go
+++ b/sqlchain/xxx_test.go
@@ -371,6 +371,7 @@ func createRandomBlock(parent hash.Hash, isGenesis bool) (b *ct.Block, err error
return
}
+ b.Queries = nil
b.SignedHeader.GenesisHash = hash.Hash{}
b.SignedHeader.Header.Producer = proto.NodeID(nis[0].Hash.String())
}
diff --git a/test/GNTE/conf/gnte.yaml b/test/GNTE/conf/gnte.yaml
index c869fd41e..7a9d6b90a 100644
--- a/test/GNTE/conf/gnte.yaml
+++ b/test/GNTE/conf/gnte.yaml
@@ -5,13 +5,13 @@ group:
nodes:
- # node_0
ip: 10.250.1.2/32
- cmd: "cd /scripts && ./bin/covenantsqld -config ./node_0/config.yaml"
+ cmd: "cd /scripts && ./bin/cqld -config ./node_0/config.yaml"
- # node_1
ip: 10.250.1.3/32
- cmd: "cd /scripts && ./bin/covenantsqld -config ./node_1/config.yaml"
+ cmd: "cd /scripts && ./bin/cqld -config ./node_1/config.yaml"
- # node_2
ip: 10.250.1.4/32
- cmd: "cd /scripts && ./bin/covenantsqld -config ./node_2/config.yaml"
+ cmd: "cd /scripts && ./bin/cqld -config ./node_2/config.yaml"
delay: "100ms 1ms 1%"
rate: "100mbit"
-
@@ -19,13 +19,13 @@ group:
nodes:
- # miner_0
ip: 10.250.1.5/32
- cmd: "cd /scripts && ./bin/covenantminerd -config ./node_miner_0/config.yaml"
+ cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_0/config.yaml"
- # miner_1
ip: 10.250.1.6/32
- cmd: "cd /scripts && ./bin/covenantminerd -config ./node_miner_1/config.yaml"
+ cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_1/config.yaml"
- # miner_2
ip: 10.250.1.7/32
- cmd: "cd /scripts && ./bin/covenantminerd -config ./node_miner_2/config.yaml"
+ cmd: "cd /scripts && ./bin/cql-minerd -config ./node_miner_2/config.yaml"
delay: "100ms 1ms 1%"
rate: "100mbit"
-
diff --git a/test/service/node_c/config.yaml b/test/service/node_c/config.yaml
index 186c5ae99..aeefebf7f 100644
--- a/test/service/node_c/config.yaml
+++ b/test/service/node_c/config.yaml
@@ -99,12 +99,12 @@ KnownNodes:
Role: Miner
Adapter:
ListenAddr: 0.0.0.0:4661
- CertificatePath: ./node_c/server.test.covenantsql.io.pem
- PrivateKeyPath: ./node_c/server.test.covenantsql.io-key.pem
+ CertificatePath: ./server.test.covenantsql.io.pem
+ PrivateKeyPath: ./server.test.covenantsql.io-key.pem
VerifyCertificate: true
- ClientCAPath: ./node_c/rootCA.pem
+ ClientCAPath: ./rootCA.pem
AdminCerts:
- - ./node_c/admin.test.covenantsql.io.pem
+ - ./admin.test.covenantsql.io.pem
WriteCerts:
- - ./node_c/write.test.covenantsql.io.pem
+ - ./write.test.covenantsql.io.pem
StorageDriver: covenantsql
diff --git a/test/service/node_mysql_adapter/config.yaml b/test/service/node_mysql_adapter/config.yaml
new file mode 100644
index 000000000..3045d1535
--- /dev/null
+++ b/test/service/node_mysql_adapter/config.yaml
@@ -0,0 +1,99 @@
+IsTestMode: true
+WorkingRoot: "./"
+PubKeyStoreFile: "public.keystore"
+PrivateKeyFile: "private.key"
+DHTFileName: "dht.db"
+ListenAddr: "172.254.1.4:4661"
+ThisNodeID: "00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d"
+ValidDNSKeys:
+ koPbw9wmYZ7ggcjnQ6ayHyhHaDNMYELKTqT+qRGrZpWSccr/lBcrm10Z1PuQHB3Azhii+sb0PYFkH1ruxLhe5g==: cloudflare.com
+ mdsswUyr3DPW132mOi8V9xESWE8jTo0dxCjjnopKl+GqJxpVXckHAeF+KkxLbxILfDLUT0rAK9iUzy1L53eKGQ==: cloudflare.com
+MinNodeIDDifficulty: 2
+DNSSeed:
+ EnforcedDNSSEC: false
+ DNSServers:
+ - 1.1.1.1
+ - 202.46.34.74
+ - 202.46.34.75
+ - 202.46.34.76
+
+BlockProducer:
+ PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24"
+ NodeID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9
+ Nonce:
+ a: 313283
+ b: 0
+ c: 0
+ d: 0
+ ChainFileName: "chain.db"
+ BPGenesisInfo:
+ Version: 1
+ BlockHash: f745ca6427237aac858dd3c7f2df8e6f3c18d0f1c164e07a1c6b8eebeba6b154
+ Producer: 0000000000000000000000000000000000000000000000000000000000000001
+ MerkleRoot: 0000000000000000000000000000000000000000000000000000000000000001
+ ParentHash: 0000000000000000000000000000000000000000000000000000000000000001
+ Timestamp: 2018-08-13T21:59:59.12Z
+KnownNodes:
+- ID: 00000bef611d346c0cbe1beaa76e7f0ed705a194fdf9ac3a248ec70e9c198bf9
+ Nonce:
+ a: 313283
+ b: 0
+ c: 0
+ d: 0
+ Addr: 172.254.1.2:4661
+ PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24"
+ Role: Leader
+- ID: 00000381d46fd6cf7742d7fb94e2422033af989c0e348b5781b3219599a3af35
+ Nonce:
+ a: 478373
+ b: 0
+ c: 0
+ d: 2305843009893772025
+ Addr: 172.254.1.3:4661
+ PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24"
+ Role: Follower
+- ID: 000000172580063ded88e010556b0aca2851265be8845b1ef397e8fce6ab5582
+ Nonce:
+ a: 259939
+ b: 0
+ c: 0
+ d: 2305843012544226372
+ Addr: 172.254.1.4:4661
+ PublicKey: "02c76216704d797c64c58bc11519fb68582e8e63de7e5b3b2dbbbe8733efe5fd24"
+ Role: Follower
+- ID: 00000f3b43288fe99831eb533ab77ec455d13e11fc38ec35a42d4edd17aa320d
+ Nonce:
+ a: 22403
+ b: 0
+ c: 0
+ d: 0
+ Addr: ""
+ PublicKey: 02ec784ca599f21ef93fe7abdc68d78817ab6c9b31f2324d15ea174d9da498b4c4
+ Role: Client
+- ID: 000005aa62048f85da4ae9698ed59c14ec0d48a88a07c15a32265634e7e64ade
+ Nonce:
+ a: 567323
+ b: 0
+ c: 0
+ d: 3104982049
+ Addr: 172.254.1.5:4661
+ PublicKey: 0367aa51809a7c1dc0f82c02452fec9557b3e1d10ce7c919d8e73d90048df86d20
+ Role: Miner
+- ID: 000005f4f22c06f76c43c4f48d5a7ec1309cc94030cbf9ebae814172884ac8b5
+ Nonce:
+ a: 240524
+ b: 0
+ c: 0
+ d: 2305843010430351476
+ Addr: 172.254.1.6:4661
+ PublicKey: 02914bca0806f040dd842207c44474ab41ecd29deee7f2d355789c5c78d448ca16
+ Role: Miner
+- ID: 000003f49592f83d0473bddb70d543f1096b4ffed5e5f942a3117e256b7052b8
+ Nonce:
+ a: 606016
+ b: 0
+ c: 0
+ d: 13835058056920509601
+ Addr: 172.254.1.7:4661
+ PublicKey: 03ae859eac5b72ee428c7a85f10b2ce748d9de5e480aefbb70f6597dfa8b2175e5
+ Role: Miner
diff --git a/test/service/node_mysql_adapter/private.key b/test/service/node_mysql_adapter/private.key
new file mode 100644
index 000000000..f563980c1
Binary files /dev/null and b/test/service/node_mysql_adapter/private.key differ
diff --git a/utils/big.go b/utils/big.go
new file mode 100644
index 000000000..0c7e75930
--- /dev/null
+++ b/utils/big.go
@@ -0,0 +1,212 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+// Package math provides integer math utilities.
+package utils
+
+import (
+ "fmt"
+ "math/big"
+)
+
+// Various big integer limit values.
+var (
+ tt255 = BigPow(2, 255)
+ tt256 = BigPow(2, 256)
+ tt256m1 = new(big.Int).Sub(tt256, big.NewInt(1))
+ tt63 = BigPow(2, 63)
+ MaxBig256 = new(big.Int).Set(tt256m1)
+ MaxBig63 = new(big.Int).Sub(tt63, big.NewInt(1))
+)
+
+const (
+ // number of bits in a big.Word
+ wordBits = 32 << (uint64(^big.Word(0)) >> 63)
+ // number of bytes in a big.Word
+ wordBytes = wordBits / 8
+)
+
+// HexOrDecimal256 marshals big.Int as hex or decimal.
+type HexOrDecimal256 big.Int
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (i *HexOrDecimal256) UnmarshalText(input []byte) error {
+ bigint, ok := ParseBig256(string(input))
+ if !ok {
+ return fmt.Errorf("invalid hex or decimal integer %q", input)
+ }
+ *i = HexOrDecimal256(*bigint)
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (i *HexOrDecimal256) MarshalText() ([]byte, error) {
+ if i == nil {
+ return []byte("0x0"), nil
+ }
+ return []byte(fmt.Sprintf("%#x", (*big.Int)(i))), nil
+}
+
+// ParseBig256 parses s as a 256 bit integer in decimal or hexadecimal syntax.
+// Leading zeros are accepted. The empty string parses as zero.
+func ParseBig256(s string) (*big.Int, bool) {
+ if s == "" {
+ return new(big.Int), true
+ }
+ var bigint *big.Int
+ var ok bool
+ if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") {
+ bigint, ok = new(big.Int).SetString(s[2:], 16)
+ } else {
+ bigint, ok = new(big.Int).SetString(s, 10)
+ }
+ if ok && bigint.BitLen() > 256 {
+ bigint, ok = nil, false
+ }
+ return bigint, ok
+}
+
+// MustParseBig256 parses s as a 256 bit big integer and panics if the string is invalid.
+func MustParseBig256(s string) *big.Int {
+ v, ok := ParseBig256(s)
+ if !ok {
+ panic("invalid 256 bit integer: " + s)
+ }
+ return v
+}
+
+// BigPow returns a ** b as a big integer.
+func BigPow(a, b int64) *big.Int {
+ r := big.NewInt(a)
+ return r.Exp(r, big.NewInt(b), nil)
+}
+
+// BigMax returns the larger of x or y.
+func BigMax(x, y *big.Int) *big.Int {
+ if x.Cmp(y) < 0 {
+ return y
+ }
+ return x
+}
+
+// BigMin returns the smaller of x or y.
+func BigMin(x, y *big.Int) *big.Int {
+ if x.Cmp(y) > 0 {
+ return y
+ }
+ return x
+}
+
+// FirstBitSet returns the index of the first 1 bit in v, counting from LSB.
+func FirstBitSet(v *big.Int) int {
+ for i := 0; i < v.BitLen(); i++ {
+ if v.Bit(i) > 0 {
+ return i
+ }
+ }
+ return v.BitLen()
+}
+
+// PaddedBigBytes encodes a big integer as a big-endian byte slice. The length
+// of the slice is at least n bytes.
+func PaddedBigBytes(bigint *big.Int, n int) []byte {
+ if bigint.BitLen()/8 >= n {
+ return bigint.Bytes()
+ }
+ ret := make([]byte, n)
+ ReadBits(bigint, ret)
+ return ret
+}
+
+// bigEndianByteAt returns the byte at position n,
+// in Big-Endian encoding
+// So n==0 returns the least significant byte
+func bigEndianByteAt(bigint *big.Int, n int) byte {
+ words := bigint.Bits()
+ // Check word-bucket the byte will reside in
+ i := n / wordBytes
+ if i >= len(words) {
+ return byte(0)
+ }
+ word := words[i]
+ // Offset of the byte
+ shift := 8 * uint(n%wordBytes)
+
+ return byte(word >> shift)
+}
+
+// Byte returns the byte at position n,
+// with the supplied padlength in Little-Endian encoding.
+// n==0 returns the MSB
+// Example: bigint '5', padlength 32, n=31 => 5
+func Byte(bigint *big.Int, padlength, n int) byte {
+ if n >= padlength {
+ return byte(0)
+ }
+ return bigEndianByteAt(bigint, padlength-1-n)
+}
+
+// ReadBits encodes the absolute value of bigint as big-endian bytes. Callers must ensure
+// that buf has enough space. If buf is too short the result will be incomplete.
+func ReadBits(bigint *big.Int, buf []byte) {
+ i := len(buf)
+ for _, d := range bigint.Bits() {
+ for j := 0; j < wordBytes && i > 0; j++ {
+ i--
+ buf[i] = byte(d)
+ d >>= 8
+ }
+ }
+}
+
+// U256 encodes as a 256 bit two's complement number. This operation is destructive.
+func U256(x *big.Int) *big.Int {
+ return x.And(x, tt256m1)
+}
+
+// S256 interprets x as a two's complement number.
+// x must not exceed 256 bits (the result is undefined if it does) and is not modified.
+//
+// S256(0) = 0
+// S256(1) = 1
+// S256(2**255) = -2**255
+// S256(2**256-1) = -1
+func S256(x *big.Int) *big.Int {
+ if x.Cmp(tt255) < 0 {
+ return x
+ }
+ return new(big.Int).Sub(x, tt256)
+}
+
+// Exp implements exponentiation by squaring.
+// Exp returns a newly-allocated big integer and does not change
+// base or exponent. The result is truncated to 256 bits.
+//
+// Courtesy @karalabe and @chfast
+func Exp(base, exponent *big.Int) *big.Int {
+ result := big.NewInt(1)
+
+ for _, word := range exponent.Bits() {
+ for i := 0; i < wordBits; i++ {
+ if word&1 == 1 {
+ U256(result.Mul(result, base))
+ }
+ U256(base.Mul(base, base))
+ word >>= 1
+ }
+ }
+ return result
+}
diff --git a/utils/big_test.go b/utils/big_test.go
new file mode 100644
index 000000000..ff58168cd
--- /dev/null
+++ b/utils/big_test.go
@@ -0,0 +1,317 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package utils
+
+import (
+ "bytes"
+ "encoding/hex"
+ "math/big"
+ "testing"
+)
+
+func TestHexOrDecimal256(t *testing.T) {
+ tests := []struct {
+ input string
+ num *big.Int
+ ok bool
+ }{
+ {"", big.NewInt(0), true},
+ {"0", big.NewInt(0), true},
+ {"0x0", big.NewInt(0), true},
+ {"12345678", big.NewInt(12345678), true},
+ {"0x12345678", big.NewInt(0x12345678), true},
+ {"0X12345678", big.NewInt(0x12345678), true},
+ // Tests for leading zero behaviour:
+ {"0123456789", big.NewInt(123456789), true}, // note: not octal
+ {"00", big.NewInt(0), true},
+ {"0x00", big.NewInt(0), true},
+ {"0x012345678abc", big.NewInt(0x12345678abc), true},
+ // Invalid syntax:
+ {"abcdef", nil, false},
+ {"0xgg", nil, false},
+ // Larger than 256 bits:
+ {"115792089237316195423570985008687907853269984665640564039457584007913129639936", nil, false},
+ }
+ for _, test := range tests {
+ var num HexOrDecimal256
+ err := num.UnmarshalText([]byte(test.input))
+ if (err == nil) != test.ok {
+ t.Errorf("ParseBig(%q) -> (err == nil) == %t, want %t", test.input, err == nil, test.ok)
+ continue
+ }
+ if test.num != nil && (*big.Int)(&num).Cmp(test.num) != 0 {
+ t.Errorf("ParseBig(%q) -> %d, want %d", test.input, (*big.Int)(&num), test.num)
+ }
+ }
+}
+
+func TestMustParseBig256(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Error("MustParseBig should've panicked")
+ }
+ }()
+ MustParseBig256("ggg")
+}
+
+func TestBigMax(t *testing.T) {
+ a := big.NewInt(10)
+ b := big.NewInt(5)
+
+ max1 := BigMax(a, b)
+ if max1 != a {
+ t.Errorf("Expected %d got %d", a, max1)
+ }
+
+ max2 := BigMax(b, a)
+ if max2 != a {
+ t.Errorf("Expected %d got %d", a, max2)
+ }
+}
+
+func TestBigMin(t *testing.T) {
+ a := big.NewInt(10)
+ b := big.NewInt(5)
+
+ min1 := BigMin(a, b)
+ if min1 != b {
+ t.Errorf("Expected %d got %d", b, min1)
+ }
+
+ min2 := BigMin(b, a)
+ if min2 != b {
+ t.Errorf("Expected %d got %d", b, min2)
+ }
+}
+
+func TestFirstBigSet(t *testing.T) {
+ tests := []struct {
+ num *big.Int
+ ix int
+ }{
+ {big.NewInt(0), 0},
+ {big.NewInt(1), 0},
+ {big.NewInt(2), 1},
+ {big.NewInt(0x100), 8},
+ }
+ for _, test := range tests {
+ if ix := FirstBitSet(test.num); ix != test.ix {
+ t.Errorf("FirstBitSet(b%b) = %d, want %d", test.num, ix, test.ix)
+ }
+ }
+}
+
+func TestPaddedBigBytes(t *testing.T) {
+ tests := []struct {
+ num *big.Int
+ n int
+ result []byte
+ }{
+ {num: big.NewInt(0), n: 4, result: []byte{0, 0, 0, 0}},
+ {num: big.NewInt(1), n: 4, result: []byte{0, 0, 0, 1}},
+ {num: big.NewInt(512), n: 4, result: []byte{0, 0, 2, 0}},
+ {num: BigPow(2, 32), n: 4, result: []byte{1, 0, 0, 0, 0}},
+ }
+ for _, test := range tests {
+ if result := PaddedBigBytes(test.num, test.n); !bytes.Equal(result, test.result) {
+ t.Errorf("PaddedBigBytes(%d, %d) = %v, want %v", test.num, test.n, result, test.result)
+ }
+ }
+}
+
+func BenchmarkPaddedBigBytesLargePadding(b *testing.B) {
+ bigint := MustParseBig256("123456789123456789123456789123456789")
+ for i := 0; i < b.N; i++ {
+ PaddedBigBytes(bigint, 200)
+ }
+}
+
+func BenchmarkPaddedBigBytesSmallPadding(b *testing.B) {
+ bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
+ for i := 0; i < b.N; i++ {
+ PaddedBigBytes(bigint, 5)
+ }
+}
+
+func BenchmarkPaddedBigBytesSmallOnePadding(b *testing.B) {
+ bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
+ for i := 0; i < b.N; i++ {
+ PaddedBigBytes(bigint, 32)
+ }
+}
+
+func BenchmarkByteAtBrandNew(b *testing.B) {
+ bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
+ for i := 0; i < b.N; i++ {
+ bigEndianByteAt(bigint, 15)
+ }
+}
+
+func BenchmarkByteAt(b *testing.B) {
+ bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
+ for i := 0; i < b.N; i++ {
+ bigEndianByteAt(bigint, 15)
+ }
+}
+
+func BenchmarkByteAtOld(b *testing.B) {
+
+ bigint := MustParseBig256("0x18F8F8F1000111000110011100222004330052300000000000000000FEFCF3CC")
+ for i := 0; i < b.N; i++ {
+ PaddedBigBytes(bigint, 32)
+ }
+}
+
+func TestReadBits(t *testing.T) {
+ check := func(input string) {
+ want, _ := hex.DecodeString(input)
+ int, _ := new(big.Int).SetString(input, 16)
+ buf := make([]byte, len(want))
+ ReadBits(int, buf)
+ if !bytes.Equal(buf, want) {
+ t.Errorf("have: %x\nwant: %x", buf, want)
+ }
+ }
+ check("000000000000000000000000000000000000000000000000000000FEFCF3F8F0")
+ check("0000000000012345000000000000000000000000000000000000FEFCF3F8F0")
+ check("18F8F8F1000111000110011100222004330052300000000000000000FEFCF3F8F0")
+}
+
+func TestU256(t *testing.T) {
+ tests := []struct{ x, y *big.Int }{
+ {x: big.NewInt(0), y: big.NewInt(0)},
+ {x: big.NewInt(1), y: big.NewInt(1)},
+ {x: BigPow(2, 255), y: BigPow(2, 255)},
+ {x: BigPow(2, 256), y: big.NewInt(0)},
+ {x: new(big.Int).Add(BigPow(2, 256), big.NewInt(1)), y: big.NewInt(1)},
+ // negative values
+ {x: big.NewInt(-1), y: new(big.Int).Sub(BigPow(2, 256), big.NewInt(1))},
+ {x: big.NewInt(-2), y: new(big.Int).Sub(BigPow(2, 256), big.NewInt(2))},
+ {x: BigPow(2, -255), y: big.NewInt(1)},
+ }
+ for _, test := range tests {
+ if y := U256(new(big.Int).Set(test.x)); y.Cmp(test.y) != 0 {
+ t.Errorf("U256(%x) = %x, want %x", test.x, y, test.y)
+ }
+ }
+}
+
+func TestBigEndianByteAt(t *testing.T) {
+ tests := []struct {
+ x string
+ y int
+ exp byte
+ }{
+ {"00", 0, 0x00},
+ {"01", 1, 0x00},
+ {"00", 1, 0x00},
+ {"01", 0, 0x01},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x30},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x20},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0xAB},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 500, 0x00},
+ }
+ for _, test := range tests {
+ b, _ := hex.DecodeString(test.x)
+ v := new(big.Int).SetBytes(b)
+ actual := bigEndianByteAt(v, test.y)
+ if actual != test.exp {
+ t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
+ }
+
+ }
+}
+func TestLittleEndianByteAt(t *testing.T) {
+ tests := []struct {
+ x string
+ y int
+ exp byte
+ }{
+ {"00", 0, 0x00},
+ {"01", 1, 0x00},
+ {"00", 1, 0x00},
+ {"01", 0, 0x00},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 0, 0x00},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 1, 0x00},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 31, 0x00},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 32, 0x00},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 0, 0xAB},
+ {"ABCDEF0908070605040302010000000000000000000000000000000000000000", 1, 0xCD},
+ {"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 0, 0x00},
+ {"00CDEF090807060504030201ffffffffffffffffffffffffffffffffffffffff", 1, 0xCD},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 31, 0x30},
+ {"0000000000000000000000000000000000000000000000000000000000102030", 30, 0x20},
+ {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 32, 0x0},
+ {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 31, 0xFF},
+ {"ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", 0xFFFF, 0x0},
+ }
+ for _, test := range tests {
+ b, _ := hex.DecodeString(test.x)
+ v := new(big.Int).SetBytes(b)
+ actual := Byte(v, 32, test.y)
+ if actual != test.exp {
+ t.Fatalf("Expected [%v] %v:th byte to be %v, was %v.", test.x, test.y, test.exp, actual)
+ }
+
+ }
+}
+
+func TestS256(t *testing.T) {
+ tests := []struct{ x, y *big.Int }{
+ {x: big.NewInt(0), y: big.NewInt(0)},
+ {x: big.NewInt(1), y: big.NewInt(1)},
+ {x: big.NewInt(2), y: big.NewInt(2)},
+ {
+ x: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)),
+ y: new(big.Int).Sub(BigPow(2, 255), big.NewInt(1)),
+ },
+ {
+ x: BigPow(2, 255),
+ y: new(big.Int).Neg(BigPow(2, 255)),
+ },
+ {
+ x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(1)),
+ y: big.NewInt(-1),
+ },
+ {
+ x: new(big.Int).Sub(BigPow(2, 256), big.NewInt(2)),
+ y: big.NewInt(-2),
+ },
+ }
+ for _, test := range tests {
+ if y := S256(test.x); y.Cmp(test.y) != 0 {
+ t.Errorf("S256(%x) = %x, want %x", test.x, y, test.y)
+ }
+ }
+}
+
+func TestExp(t *testing.T) {
+ tests := []struct{ base, exponent, result *big.Int }{
+ {base: big.NewInt(0), exponent: big.NewInt(0), result: big.NewInt(1)},
+ {base: big.NewInt(1), exponent: big.NewInt(0), result: big.NewInt(1)},
+ {base: big.NewInt(1), exponent: big.NewInt(1), result: big.NewInt(1)},
+ {base: big.NewInt(1), exponent: big.NewInt(2), result: big.NewInt(1)},
+ {base: big.NewInt(3), exponent: big.NewInt(144), result: MustParseBig256("507528786056415600719754159741696356908742250191663887263627442114881")},
+ {base: big.NewInt(2), exponent: big.NewInt(255), result: MustParseBig256("57896044618658097711785492504343953926634992332820282019728792003956564819968")},
+ }
+ for _, test := range tests {
+ if result := Exp(test.base, test.exponent); result.Cmp(test.result) != 0 {
+ t.Errorf("Exp(%d, %d) = %d, want %d", test.base, test.exponent, result, test.result)
+ }
+ }
+}
diff --git a/utils/exec.go b/utils/exec.go
index 3e5e97ebb..596bbe5d7 100644
--- a/utils/exec.go
+++ b/utils/exec.go
@@ -29,6 +29,12 @@ import (
// FJ is short for filepath.Join
var FJ = filepath.Join
+// CMD is the struct holding exec.Cmd and log path
+type CMD struct {
+ Cmd *exec.Cmd
+ LogPath string
+}
+
// GetProjectSrcDir gets the src code root
func GetProjectSrcDir() string {
_, testFile, _, _ := runtime.Caller(0)
@@ -60,17 +66,19 @@ func RunCommand(bin string, args []string, processName string, workingDir string
log.Errorf("start command failed: %v", err)
return
}
- err = cmd.Wait()
+ err = cmd.Cmd.Wait()
if err != nil {
- log.Errorf("cmd %s args %s failed with %v", cmd.Path, cmd.Args, err)
+ log.Errorf("cmd %s args %s failed with %v", cmd.Cmd.Path, cmd.Cmd.Args, err)
return
}
return
}
// RunCommandNB starts a non-blocking command
-func RunCommandNB(bin string, args []string, processName string, workingDir string, logDir string, toStd bool) (cmd *exec.Cmd, err error) {
- logFD, err := os.Create(FJ(logDir, processName+".log"))
+func RunCommandNB(bin string, args []string, processName string, workingDir string, logDir string, toStd bool) (cmd *CMD, err error) {
+ cmd = new(CMD)
+ cmd.LogPath = FJ(logDir, processName+".log")
+ logFD, err := os.Create(cmd.LogPath)
if err != nil {
log.Errorf("create log file failed: %s", err)
return
@@ -81,9 +89,9 @@ func RunCommandNB(bin string, args []string, processName string, workingDir stri
log.Errorf("change working dir failed: %s", err)
return
}
- cmd = exec.Command(bin, args...)
- stdoutIn, _ := cmd.StdoutPipe()
- stderrIn, _ := cmd.StderrPipe()
+ cmd.Cmd = exec.Command(bin, args...)
+ stdoutIn, _ := cmd.Cmd.StdoutPipe()
+ stderrIn, _ := cmd.Cmd.StderrPipe()
var stdout, stderr io.Writer
if toStd {
@@ -94,7 +102,7 @@ func RunCommandNB(bin string, args []string, processName string, workingDir stri
stderr = logFD
}
- err = cmd.Start()
+ err = cmd.Cmd.Start()
if err != nil {
log.Errorf("cmd.Start() failed with '%v'", err)
return
diff --git a/utils/integer.go b/utils/integer.go
new file mode 100644
index 000000000..9d52f69c4
--- /dev/null
+++ b/utils/integer.go
@@ -0,0 +1,99 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package utils
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// Integer limit values.
+const (
+ MaxInt8 = 1<<7 - 1
+ MinInt8 = -1 << 7
+ MaxInt16 = 1<<15 - 1
+ MinInt16 = -1 << 15
+ MaxInt32 = 1<<31 - 1
+ MinInt32 = -1 << 31
+ MaxInt64 = 1<<63 - 1
+ MinInt64 = -1 << 63
+ MaxUint8 = 1<<8 - 1
+ MaxUint16 = 1<<16 - 1
+ MaxUint32 = 1<<32 - 1
+ MaxUint64 = 1<<64 - 1
+)
+
+// HexOrDecimal64 marshals uint64 as hex or decimal.
+type HexOrDecimal64 uint64
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (i *HexOrDecimal64) UnmarshalText(input []byte) error {
+ int, ok := ParseUint64(string(input))
+ if !ok {
+ return fmt.Errorf("invalid hex or decimal integer %q", input)
+ }
+ *i = HexOrDecimal64(int)
+ return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (i HexOrDecimal64) MarshalText() ([]byte, error) {
+ return []byte(fmt.Sprintf("%#x", uint64(i))), nil
+}
+
+// ParseUint64 parses s as an integer in decimal or hexadecimal syntax.
+// Leading zeros are accepted. The empty string parses as zero.
+func ParseUint64(s string) (uint64, bool) {
+ if s == "" {
+ return 0, true
+ }
+ if len(s) >= 2 && (s[:2] == "0x" || s[:2] == "0X") {
+ v, err := strconv.ParseUint(s[2:], 16, 64)
+ return v, err == nil
+ }
+ v, err := strconv.ParseUint(s, 10, 64)
+ return v, err == nil
+}
+
+// MustParseUint64 parses s as an integer and panics if the string is invalid.
+func MustParseUint64(s string) uint64 {
+ v, ok := ParseUint64(s)
+ if !ok {
+ panic("invalid unsigned 64 bit integer: " + s)
+ }
+ return v
+}
+
+// NOTE: The following methods need to be optimised using either bit checking or asm
+
+// SafeSub returns subtraction result and whether overflow occurred.
+func SafeSub(x, y uint64) (uint64, bool) {
+ return x - y, x < y
+}
+
+// SafeAdd returns the result and whether overflow occurred.
+func SafeAdd(x, y uint64) (uint64, bool) {
+ return x + y, y > MaxUint64-x
+}
+
+// SafeMul returns multiplication result and whether overflow occurred.
+func SafeMul(x, y uint64) (uint64, bool) {
+ if x == 0 || y == 0 {
+ return 0, false
+ }
+ return x * y, y > MaxUint64/x
+}
diff --git a/utils/integer_test.go b/utils/integer_test.go
new file mode 100644
index 000000000..c63772875
--- /dev/null
+++ b/utils/integer_test.go
@@ -0,0 +1,116 @@
+// Copyright 2017 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package utils
+
+import (
+ "testing"
+)
+
+type operation byte
+
+const (
+ sub operation = iota
+ add
+ mul
+)
+
+func TestOverflow(t *testing.T) {
+ for i, test := range []struct {
+ x uint64
+ y uint64
+ overflow bool
+ op operation
+ }{
+ // add operations
+ {MaxUint64, 1, true, add},
+ {MaxUint64 - 1, 1, false, add},
+
+ // sub operations
+ {0, 1, true, sub},
+ {0, 0, false, sub},
+
+ // mul operations
+ {0, 0, false, mul},
+ {10, 10, false, mul},
+ {MaxUint64, 2, true, mul},
+ {MaxUint64, 1, false, mul},
+ } {
+ var overflows bool
+ switch test.op {
+ case sub:
+ _, overflows = SafeSub(test.x, test.y)
+ case add:
+ _, overflows = SafeAdd(test.x, test.y)
+ case mul:
+ _, overflows = SafeMul(test.x, test.y)
+ }
+
+ if test.overflow != overflows {
+ t.Errorf("%d failed. Expected test to be %v, got %v", i, test.overflow, overflows)
+ }
+ }
+}
+
+func TestHexOrDecimal64(t *testing.T) {
+ tests := []struct {
+ input string
+ num uint64
+ ok bool
+ }{
+ {"", 0, true},
+ {"0", 0, true},
+ {"0x0", 0, true},
+ {"12345678", 12345678, true},
+ {"0x12345678", 0x12345678, true},
+ {"0X12345678", 0x12345678, true},
+ // Tests for leading zero behaviour:
+ {"0123456789", 123456789, true}, // note: not octal
+ {"0x00", 0, true},
+ {"0x012345678abc", 0x12345678abc, true},
+ // Invalid syntax:
+ {"abcdef", 0, false},
+ {"0xgg", 0, false},
+ // Doesn't fit into 64 bits:
+ {"18446744073709551617", 0, false},
+ }
+ for _, test := range tests {
+ var num HexOrDecimal64
+ err := num.UnmarshalText([]byte(test.input))
+ if (err == nil) != test.ok {
+ t.Errorf("ParseUint64(%q) -> (err == nil) = %t, want %t", test.input, err == nil, test.ok)
+ continue
+ }
+ if err == nil && uint64(num) != test.num {
+ t.Errorf("ParseUint64(%q) -> %d, want %d", test.input, num, test.num)
+ }
+ }
+}
+
+func TestMustParseUint64(t *testing.T) {
+ if v := MustParseUint64("12345"); v != 12345 {
+ t.Errorf(`MustParseUint64("12345") = %d, want 12345`, v)
+ }
+}
+
+func TestMustParseUint64Panic(t *testing.T) {
+ defer func() {
+ if recover() == nil {
+ t.Error("MustParseBig should've panicked")
+ }
+ }()
+ MustParseUint64("ggg")
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/CONTRIBUTORS.md b/vendor/github.com/CovenantSQL/sqlparser/CONTRIBUTORS.md
new file mode 100644
index 000000000..a44885cd9
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/CONTRIBUTORS.md
@@ -0,0 +1,9 @@
+This project is originally a fork of [https://github.com/youtube/vitess](https://github.com/youtube/vitess)
+Copyright Google Inc
+
+# Contributors
+Wenbin Xiao 2015
+Started this project and maintained it.
+
+Andrew Brampton 2017
+Merged in multiple upstream fixes/changes.
\ No newline at end of file
diff --git a/vendor/github.com/CovenantSQL/sqlparser/LICENSE.md b/vendor/github.com/CovenantSQL/sqlparser/LICENSE.md
new file mode 100644
index 000000000..f49a4e16e
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/LICENSE.md
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
\ No newline at end of file
diff --git a/vendor/github.com/CovenantSQL/sqlparser/Makefile b/vendor/github.com/CovenantSQL/sqlparser/Makefile
new file mode 100644
index 000000000..215f422e6
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/Makefile
@@ -0,0 +1,22 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+MAKEFLAGS = -s
+
+sql.go: sql.y
+ goyacc -o sql.go sql.y
+ gofmt -w sql.go
+
+clean:
+ rm -f y.output sql.go
diff --git a/vendor/github.com/CovenantSQL/sqlparser/README.md b/vendor/github.com/CovenantSQL/sqlparser/README.md
new file mode 100644
index 000000000..58d475967
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/README.md
@@ -0,0 +1,150 @@
+# sqlparser [](https://travis-ci.org/xwb1989/sqlparser) [](https://coveralls.io/github/xwb1989/sqlparser) [](https://goreportcard.com/report/github.com/xwb1989/sqlparser) [](https://godoc.org/github.com/xwb1989/sqlparser)
+
+Go package for parsing MySQL SQL queries.
+
+## Notice
+
+The backbone of this repo is extracted from [vitessio/vitess](https://github.com/vitessio/vitess).
+
+Inside vitessio/vitess there is a very nicely written sql parser. However as it's not a self-contained application, I created this one.
+It applies the same LICENSE as vitessio/vitess.
+
+## Usage
+
+```go
+import (
+ "github.com/xwb1989/sqlparser"
+)
+```
+
+Then use:
+
+```go
+sql := "SELECT * FROM table WHERE a = 'abc'"
+stmt, err := sqlparser.Parse(sql)
+if err != nil {
+ // Do something with the err
+}
+
+// Otherwise do something with stmt
+switch stmt := stmt.(type) {
+case *sqlparser.Select:
+ _ = stmt
+case *sqlparser.Insert:
+}
+```
+
+Alternative to read many queries from a io.Reader:
+
+```go
+r := strings.NewReader("INSERT INTO table1 VALUES (1, 'a'); INSERT INTO table2 VALUES (3, 4);")
+
+tokens := sqlparser.NewTokenizer(r)
+for {
+ stmt, err := sqlparser.ParseNext(tokens)
+ if err == io.EOF {
+ break
+ }
+ // Do something with stmt or err.
+}
+```
+
+See [parse_test.go](https://github.com/xwb1989/sqlparser/blob/master/parse_test.go) for more examples, or read the [godoc](https://godoc.org/github.com/xwb1989/sqlparser).
+
+
+## Porting Instructions
+
+You only need the below if you plan to try and keep this library up to date with [vitessio/vitess](https://github.com/vitessio/vitess).
+
+### Keeping up to date
+
+```bash
+shopt -s nullglob
+VITESS=${GOPATH?}/src/vitess.io/vitess/go/
+XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
+
+# Create patches for everything that changed
+LASTIMPORT=1b7879cb91f1dfe1a2dfa06fea96e951e3a7aec5
+for path in ${VITESS?}/{vt/sqlparser,sqltypes,bytes2,hack}; do
+ cd ${path}
+ git format-patch ${LASTIMPORT?} .
+done;
+
+# Apply patches to the dependencies
+cd ${XWB1989?}
+git am --directory dependency -p2 ${VITESS?}/{sqltypes,bytes2,hack}/*.patch
+
+# Apply the main patches to the repo
+cd ${XWB1989?}
+git am -p4 ${VITESS?}/vt/sqlparser/*.patch
+
+# If you encounter diff failures, manually fix them with
+patch -p4 < .git/rebase-apply/patch
+...
+git add name_of_files
+git am --continue
+
+# Cleanup
+rm ${VITESS?}/{sqltypes,bytes2,hack}/*.patch ${VITESS?}/*.patch
+
+# and Finally update the LASTIMPORT in this README.
+```
+
+### Fresh install
+
+TODO: Change these instructions to use git to copy the files, that'll make later patching easier.
+
+```bash
+VITESS=${GOPATH?}/src/vitess.io/vitess/go/
+XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
+
+cd ${XWB1989?}
+
+# Copy all the code
+cp -pr ${VITESS?}/vt/sqlparser/ .
+cp -pr ${VITESS?}/sqltypes dependency
+cp -pr ${VITESS?}/bytes2 dependency
+cp -pr ${VITESS?}/hack dependency
+
+# Delete some code we haven't ported
+rm dependency/sqltypes/arithmetic.go dependency/sqltypes/arithmetic_test.go dependency/sqltypes/event_token.go dependency/sqltypes/event_token_test.go dependency/sqltypes/proto3.go dependency/sqltypes/proto3_test.go dependency/sqltypes/query_response.go dependency/sqltypes/result.go dependency/sqltypes/result_test.go
+
+# Some automated fixes
+
+# Fix imports
+sed -i '.bak' 's_vitess.io/vitess/go/vt/proto/query_github.com/xwb1989/sqlparser/dependency/querypb_g' *.go dependency/sqltypes/*.go
+sed -i '.bak' 's_vitess.io/vitess/go/_github.com/xwb1989/sqlparser/dependency/_g' *.go dependency/sqltypes/*.go
+
+# Copy the proto, but basically drop everything we don't want
+cp -pr ${VITESS?}/vt/proto/query dependency/querypb
+
+sed -i '.bak' 's_.*Descriptor.*__g' dependency/querypb/*.go
+sed -i '.bak' 's_.*ProtoMessage.*__g' dependency/querypb/*.go
+
+sed -i '.bak' 's/proto.CompactTextString(m)/"TODO"/g' dependency/querypb/*.go
+sed -i '.bak' 's/proto.EnumName/EnumName/g' dependency/querypb/*.go
+
+sed -i '.bak' 's/proto.Equal/reflect.DeepEqual/g' dependency/sqltypes/*.go
+
+# Remove the error library
+sed -i '.bak' 's/vterrors.Errorf([^,]*, /fmt.Errorf(/g' *.go dependency/sqltypes/*.go
+sed -i '.bak' 's/vterrors.New([^,]*, /errors.New(/g' *.go dependency/sqltypes/*.go
+```
+
+### Testing
+
+```bash
+VITESS=${GOPATH?}/src/vitess.io/vitess/go/
+XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/
+
+cd ${XWB1989?}
+
+# Test, fix and repeat
+go test ./...
+
+# Finally make some diffs (for later reference)
+diff -u ${VITESS?}/sqltypes/ ${XWB1989?}/dependency/sqltypes/ > ${XWB1989?}/patches/sqltypes.patch
+diff -u ${VITESS?}/bytes2/ ${XWB1989?}/dependency/bytes2/ > ${XWB1989?}/patches/bytes2.patch
+diff -u ${VITESS?}/vt/proto/query/ ${XWB1989?}/dependency/querypb/ > ${XWB1989?}/patches/querypb.patch
+diff -u ${VITESS?}/vt/sqlparser/ ${XWB1989?}/ > ${XWB1989?}/patches/sqlparser.patch
+```
\ No newline at end of file
diff --git a/vendor/github.com/CovenantSQL/sqlparser/analyzer.go b/vendor/github.com/CovenantSQL/sqlparser/analyzer.go
new file mode 100644
index 000000000..eff1c8f89
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/analyzer.go
@@ -0,0 +1,343 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+// analyzer.go contains utility analysis functions.
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+// These constants are used to identify the SQL statement type.
+const (
+ StmtSelect = iota
+ StmtStream
+ StmtInsert
+ StmtReplace
+ StmtUpdate
+ StmtDelete
+ StmtDDL
+ StmtBegin
+ StmtCommit
+ StmtRollback
+ StmtSet
+ StmtShow
+ StmtUse
+ StmtOther
+ StmtUnknown
+ StmtComment
+)
+
+// Preview analyzes the beginning of the query using a simpler and faster
+// textual comparison to identify the statement type.
+func Preview(sql string) int {
+ trimmed := StripLeadingComments(sql)
+
+ firstWord := trimmed
+ if end := strings.IndexFunc(trimmed, unicode.IsSpace); end != -1 {
+ firstWord = trimmed[:end]
+ }
+ firstWord = strings.TrimLeftFunc(firstWord, func(r rune) bool { return !unicode.IsLetter(r) })
+ // Comparison is done in order of priority.
+ loweredFirstWord := strings.ToLower(firstWord)
+ switch loweredFirstWord {
+ case "select":
+ return StmtSelect
+ case "stream":
+ return StmtStream
+ case "insert":
+ return StmtInsert
+ case "replace":
+ return StmtReplace
+ case "update":
+ return StmtUpdate
+ case "delete":
+ return StmtDelete
+ }
+ // For the following statements it is not sufficient to rely
+ // on loweredFirstWord. This is because they are not statements
+ // in the grammar and we are relying on Preview to parse them.
+ // For instance, we don't want: "BEGIN JUNK" to be parsed
+ // as StmtBegin.
+ trimmedNoComments, _ := SplitMarginComments(trimmed)
+ switch strings.ToLower(trimmedNoComments) {
+ case "begin", "start transaction":
+ return StmtBegin
+ case "commit":
+ return StmtCommit
+ case "rollback":
+ return StmtRollback
+ }
+ switch loweredFirstWord {
+ case "create", "alter", "rename", "drop", "truncate":
+ return StmtDDL
+ case "set":
+ return StmtSet
+ case "show":
+ return StmtShow
+ case "use":
+ return StmtUse
+ case "analyze", "describe", "desc", "explain", "repair", "optimize":
+ return StmtOther
+ }
+ if strings.Index(trimmed, "/*!") == 0 {
+ return StmtComment
+ }
+ return StmtUnknown
+}
+
+// StmtType returns the statement type as a string
+func StmtType(stmtType int) string {
+ switch stmtType {
+ case StmtSelect:
+ return "SELECT"
+ case StmtStream:
+ return "STREAM"
+ case StmtInsert:
+ return "INSERT"
+ case StmtReplace:
+ return "REPLACE"
+ case StmtUpdate:
+ return "UPDATE"
+ case StmtDelete:
+ return "DELETE"
+ case StmtDDL:
+ return "DDL"
+ case StmtBegin:
+ return "BEGIN"
+ case StmtCommit:
+ return "COMMIT"
+ case StmtRollback:
+ return "ROLLBACK"
+ case StmtSet:
+ return "SET"
+ case StmtShow:
+ return "SHOW"
+ case StmtUse:
+ return "USE"
+ case StmtOther:
+ return "OTHER"
+ default:
+ return "UNKNOWN"
+ }
+}
+
+// IsDML returns true if the query is an INSERT, UPDATE or DELETE statement.
+func IsDML(sql string) bool {
+ switch Preview(sql) {
+ case StmtInsert, StmtReplace, StmtUpdate, StmtDelete:
+ return true
+ }
+ return false
+}
+
+// GetTableName returns the table name from the SimpleTableExpr
+// only if it's a simple expression. Otherwise, it returns "".
+func GetTableName(node SimpleTableExpr) TableIdent {
+ if n, ok := node.(TableName); ok && n.Qualifier.IsEmpty() {
+ return n.Name
+ }
+ // sub-select or '.' expression
+ return NewTableIdent("")
+}
+
+// IsColName returns true if the Expr is a *ColName.
+func IsColName(node Expr) bool {
+ _, ok := node.(*ColName)
+ return ok
+}
+
+// IsValue returns true if the Expr is a string, integral or value arg.
+// NULL is not considered to be a value.
+func IsValue(node Expr) bool {
+ switch v := node.(type) {
+ case *SQLVal:
+ switch v.Type {
+ case StrVal, HexVal, IntVal, ValArg:
+ return true
+ }
+ }
+ return false
+}
+
+// IsNull returns true if the Expr is SQL NULL
+func IsNull(node Expr) bool {
+ switch node.(type) {
+ case *NullVal:
+ return true
+ }
+ return false
+}
+
+// IsSimpleTuple returns true if the Expr is a ValTuple that
+// contains simple values or if it's a list arg.
+func IsSimpleTuple(node Expr) bool {
+ switch vals := node.(type) {
+ case ValTuple:
+ for _, n := range vals {
+ if !IsValue(n) {
+ return false
+ }
+ }
+ return true
+ case ListArg:
+ return true
+ }
+ // It's a subquery
+ return false
+}
+
+// NewPlanValue builds a sqltypes.PlanValue from an Expr.
+func NewPlanValue(node Expr) (sqltypes.PlanValue, error) {
+ switch node := node.(type) {
+ case *SQLVal:
+ switch node.Type {
+ case ValArg:
+ return sqltypes.PlanValue{Key: string(node.Val[1:])}, nil
+ case IntVal:
+ n, err := sqltypes.NewIntegral(string(node.Val))
+ if err != nil {
+ return sqltypes.PlanValue{}, fmt.Errorf("%v", err)
+ }
+ return sqltypes.PlanValue{Value: n}, nil
+ case StrVal:
+ return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil
+ case HexVal:
+ v, err := node.HexDecode()
+ if err != nil {
+ return sqltypes.PlanValue{}, fmt.Errorf("%v", err)
+ }
+ return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, v)}, nil
+ }
+ case ListArg:
+ return sqltypes.PlanValue{ListKey: string(node[2:])}, nil
+ case ValTuple:
+ pv := sqltypes.PlanValue{
+ Values: make([]sqltypes.PlanValue, 0, len(node)),
+ }
+ for _, val := range node {
+ innerpv, err := NewPlanValue(val)
+ if err != nil {
+ return sqltypes.PlanValue{}, err
+ }
+ if innerpv.ListKey != "" || innerpv.Values != nil {
+ return sqltypes.PlanValue{}, errors.New("unsupported: nested lists")
+ }
+ pv.Values = append(pv.Values, innerpv)
+ }
+ return pv, nil
+ case *NullVal:
+ return sqltypes.PlanValue{}, nil
+ }
+ return sqltypes.PlanValue{}, fmt.Errorf("expression is too complex '%v'", String(node))
+}
+
+// StringIn is a convenience function that returns
+// true if str matches any of the values.
+func StringIn(str string, values ...string) bool {
+ for _, val := range values {
+ if str == val {
+ return true
+ }
+ }
+ return false
+}
+
+// SetKey is the extracted key from one SetExpr
+type SetKey struct {
+ Key string
+ Scope string
+}
+
+// ExtractSetValues returns a map of key-value pairs
+// if the query is a SET statement. Values can be bool, int64 or string.
+// Since set variable names are case insensitive, all keys are returned
+// as lower case.
+func ExtractSetValues(sql string) (keyValues map[SetKey]interface{}, scope string, err error) {
+ stmt, err := Parse(sql)
+ if err != nil {
+ return nil, "", err
+ }
+ setStmt, ok := stmt.(*Set)
+ if !ok {
+ return nil, "", fmt.Errorf("ast did not yield *sqlparser.Set: %T", stmt)
+ }
+ result := make(map[SetKey]interface{})
+ for _, expr := range setStmt.Exprs {
+ scope := SessionStr
+ key := expr.Name.Lowered()
+ switch {
+ case strings.HasPrefix(key, "@@global."):
+ scope = GlobalStr
+ key = strings.TrimPrefix(key, "@@global.")
+ case strings.HasPrefix(key, "@@session."):
+ key = strings.TrimPrefix(key, "@@session.")
+ case strings.HasPrefix(key, "@@"):
+ key = strings.TrimPrefix(key, "@@")
+ }
+
+ if strings.HasPrefix(expr.Name.Lowered(), "@@") {
+ if setStmt.Scope != "" && scope != "" {
+ return nil, "", fmt.Errorf("unsupported in set: mixed using of variable scope")
+ }
+ _, out := NewStringTokenizer(key).Scan()
+ key = string(out)
+ }
+
+ setKey := SetKey{
+ Key: key,
+ Scope: scope,
+ }
+
+ switch expr := expr.Expr.(type) {
+ case *SQLVal:
+ switch expr.Type {
+ case StrVal:
+ result[setKey] = strings.ToLower(string(expr.Val))
+ case IntVal:
+ num, err := strconv.ParseInt(string(expr.Val), 0, 64)
+ if err != nil {
+ return nil, "", err
+ }
+ result[setKey] = num
+ default:
+ return nil, "", fmt.Errorf("invalid value type: %v", String(expr))
+ }
+ case BoolVal:
+ var val int64
+ if expr {
+ val = 1
+ }
+ result[setKey] = val
+ case *ColName:
+ result[setKey] = expr.Name.String()
+ case *NullVal:
+ result[setKey] = nil
+ case *Default:
+ result[setKey] = "default"
+ default:
+ return nil, "", fmt.Errorf("invalid syntax: %s", String(expr))
+ }
+ }
+ return result, strings.ToLower(setStmt.Scope), nil
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/ast.go b/vendor/github.com/CovenantSQL/sqlparser/ast.go
new file mode 100644
index 000000000..c850c5006
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/ast.go
@@ -0,0 +1,3310 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "bytes"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+// Instructions for creating new types: If a type
+// needs to satisfy an interface, declare that function
+// along with that interface. This will help users
+// identify the list of types to which they can assert
+// those interfaces.
+// If the member of a type has a string with a predefined
+// list of values, declare those values as const following
+// the type.
+// For interfaces that define dummy functions to consolidate
+// a set of types, define the function as iTypeName.
+// This will help avoid name collisions.
+
+// Parse parses the SQL in full and returns a Statement, which
+// is the AST representation of the query. If a DDL statement
+// is partially parsed but still contains a syntax error, the
+// error is ignored and the DDL is returned anyway.
+func Parse(sql string) (Statement, error) {
+ tokenizer := NewStringTokenizer(sql)
+ if yyParse(tokenizer) != 0 {
+ if tokenizer.partialDDL != nil {
+ log.Printf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError)
+ tokenizer.ParseTree = tokenizer.partialDDL
+ return tokenizer.ParseTree, nil
+ }
+ return nil, tokenizer.LastError
+ }
+ return tokenizer.ParseTree, nil
+}
+
+// ParseStrictDDL is the same as Parse except it errors on
+// partially parsed DDL statements.
+func ParseStrictDDL(sql string) (Statement, error) {
+ tokenizer := NewStringTokenizer(sql)
+ if yyParse(tokenizer) != 0 {
+ return nil, tokenizer.LastError
+ }
+ return tokenizer.ParseTree, nil
+}
+
+// ParseNext parses a single SQL statement from the tokenizer
+// returning a Statement which is the AST representation of the query.
+// The tokenizer will always read up to the end of the statement, allowing for
+// the next call to ParseNext to parse any subsequent SQL statements. When
+// there are no more statements to parse, a error of io.EOF is returned.
+func ParseNext(tokenizer *Tokenizer) (Statement, error) {
+ if tokenizer.lastChar == ';' {
+ tokenizer.next()
+ tokenizer.skipBlank()
+ }
+ if tokenizer.lastChar == eofChar {
+ return nil, io.EOF
+ }
+
+ tokenizer.reset()
+ tokenizer.multi = true
+ if yyParse(tokenizer) != 0 {
+ if tokenizer.partialDDL != nil {
+ tokenizer.ParseTree = tokenizer.partialDDL
+ return tokenizer.ParseTree, nil
+ }
+ return nil, tokenizer.LastError
+ }
+ return tokenizer.ParseTree, nil
+}
+
+// SplitStatement returns the first sql statement up to either a ; or EOF
+// and the remainder from the given buffer
+func SplitStatement(blob string) (string, string, error) {
+ tokenizer := NewStringTokenizer(blob)
+ tkn := 0
+ for {
+ tkn, _ = tokenizer.Scan()
+ if tkn == 0 || tkn == ';' || tkn == eofChar {
+ break
+ }
+ }
+ if tokenizer.LastError != nil {
+ return "", "", tokenizer.LastError
+ }
+ if tkn == ';' {
+ return blob[:tokenizer.Position-2], blob[tokenizer.Position-1:], nil
+ }
+ return blob, "", nil
+}
+
+// SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces
+// returns the sql pieces blob contains; or error if sql cannot be parsed
+func SplitStatementToPieces(blob string) (pieces []string, err error) {
+ pieces = make([]string, 0, 16)
+ tokenizer := NewStringTokenizer(blob)
+
+ tkn := 0
+ var stmt string
+ stmtBegin := 0
+ for {
+ tkn, _ = tokenizer.Scan()
+ if tkn == ';' {
+ stmt = blob[stmtBegin : tokenizer.Position-2]
+ pieces = append(pieces, stmt)
+ stmtBegin = tokenizer.Position - 1
+
+ } else if tkn == 0 || tkn == eofChar {
+ blobTail := tokenizer.Position - 2
+
+ if stmtBegin < blobTail {
+ stmt = blob[stmtBegin : blobTail+1]
+ pieces = append(pieces, stmt)
+ }
+ break
+ }
+ }
+
+ err = tokenizer.LastError
+ return
+}
+
+// SQLNode defines the interface for all nodes
+// generated by the parser.
+type SQLNode interface {
+ Format(buf *TrackedBuffer)
+ // walkSubtree calls visit on all underlying nodes
+ // of the subtree, but not the current one. Walking
+ // must be interrupted if visit returns an error.
+ walkSubtree(visit Visit) error
+}
+
+// Visit defines the signature of a function that
+// can be used to visit all nodes of a parse tree.
+type Visit func(node SQLNode) (kontinue bool, err error)
+
+// Walk calls visit on every node.
+// If visit returns true, the underlying nodes
+// are also visited. If it returns an error, walking
+// is interrupted, and the error is returned.
+func Walk(visit Visit, nodes ...SQLNode) error {
+ for _, node := range nodes {
+ if node == nil {
+ continue
+ }
+ kontinue, err := visit(node)
+ if err != nil {
+ return err
+ }
+ if kontinue {
+ err = node.walkSubtree(visit)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// String returns a string representation of an SQLNode.
+func String(node SQLNode) string {
+ if node == nil {
+ return ""
+ }
+
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf("%v", node)
+ return buf.String()
+}
+
+// Append appends the SQLNode to the buffer.
+func Append(buf *bytes.Buffer, node SQLNode) {
+ tbuf := &TrackedBuffer{
+ Buffer: buf,
+ }
+ node.Format(tbuf)
+}
+
+// Statement represents a statement.
+type Statement interface {
+ iStatement()
+ SQLNode
+}
+
+func (*Union) iStatement() {}
+func (*Select) iStatement() {}
+func (*Stream) iStatement() {}
+func (*Insert) iStatement() {}
+func (*Update) iStatement() {}
+func (*Delete) iStatement() {}
+func (*Set) iStatement() {}
+func (*DBDDL) iStatement() {}
+func (*DDL) iStatement() {}
+func (*Show) iStatement() {}
+
+// ParenSelect can actually not be a top level statement,
+// but we have to allow it because it's a requirement
+// of SelectStatement.
+func (*ParenSelect) iStatement() {}
+
+// SelectStatement any SELECT statement.
+type SelectStatement interface {
+ iSelectStatement()
+ iStatement()
+ iInsertRows()
+ AddOrder(*Order)
+ SetLimit(*Limit)
+ SQLNode
+}
+
+func (*Select) iSelectStatement() {}
+func (*Union) iSelectStatement() {}
+func (*ParenSelect) iSelectStatement() {}
+
+// Select represents a SELECT statement.
+type Select struct {
+ Cache string
+ Comments Comments
+ Distinct string
+ Hints string
+ SelectExprs SelectExprs
+ From TableExprs
+ Where *Where
+ GroupBy GroupBy
+ Having *Where
+ OrderBy OrderBy
+ Limit *Limit
+ Lock string
+}
+
+// Select.Distinct
+const (
+ DistinctStr = "distinct "
+ StraightJoinHint = "straight_join "
+)
+
+// Select.Lock
+const (
+ ForUpdateStr = " for update"
+ ShareModeStr = " lock in share mode"
+)
+
+// Select.Cache
+const (
+ SQLCacheStr = "sql_cache "
+ SQLNoCacheStr = "sql_no_cache "
+)
+
+// AddOrder adds an order by element
+func (node *Select) AddOrder(order *Order) {
+ node.OrderBy = append(node.OrderBy, order)
+}
+
+// SetLimit sets the limit clause
+func (node *Select) SetLimit(limit *Limit) {
+ node.Limit = limit
+}
+
+// Format formats the node.
+func (node *Select) Format(buf *TrackedBuffer) {
+ if node.From != nil {
+ buf.Myprintf("select %v%s%s%s%v from %v%v%v%v%v%v%s",
+ node.Comments, node.Cache, node.Distinct, node.Hints, node.SelectExprs,
+ node.From, node.Where,
+ node.GroupBy, node.Having, node.OrderBy,
+ node.Limit, node.Lock)
+ } else {
+ buf.Myprintf("select %v%s%s%s%v",
+ node.Comments, node.Cache, node.Distinct, node.Hints, node.SelectExprs)
+ }
+}
+
+func (node *Select) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.SelectExprs,
+ node.From,
+ node.Where,
+ node.GroupBy,
+ node.Having,
+ node.OrderBy,
+ node.Limit,
+ )
+}
+
+// AddWhere adds the boolean expression to the
+// WHERE clause as an AND condition. If the expression
+// is an OR clause, it parenthesizes it. Currently,
+// the OR operator is the only one that's lower precedence
+// than AND.
+func (node *Select) AddWhere(expr Expr) {
+ if _, ok := expr.(*OrExpr); ok {
+ expr = &ParenExpr{Expr: expr}
+ }
+ if node.Where == nil {
+ node.Where = &Where{
+ Type: WhereStr,
+ Expr: expr,
+ }
+ return
+ }
+ node.Where.Expr = &AndExpr{
+ Left: node.Where.Expr,
+ Right: expr,
+ }
+ return
+}
+
+// AddHaving adds the boolean expression to the
+// HAVING clause as an AND condition. If the expression
+// is an OR clause, it parenthesizes it. Currently,
+// the OR operator is the only one that's lower precedence
+// than AND.
+func (node *Select) AddHaving(expr Expr) {
+ if _, ok := expr.(*OrExpr); ok {
+ expr = &ParenExpr{Expr: expr}
+ }
+ if node.Having == nil {
+ node.Having = &Where{
+ Type: HavingStr,
+ Expr: expr,
+ }
+ return
+ }
+ node.Having.Expr = &AndExpr{
+ Left: node.Having.Expr,
+ Right: expr,
+ }
+ return
+}
+
+// ParenSelect is a parenthesized SELECT statement.
+type ParenSelect struct {
+ Select SelectStatement
+}
+
+// AddOrder adds an order by element
+func (node *ParenSelect) AddOrder(order *Order) {
+ panic("unreachable")
+}
+
+// SetLimit sets the limit clause
+func (node *ParenSelect) SetLimit(limit *Limit) {
+ panic("unreachable")
+}
+
+// Format formats the node.
+func (node *ParenSelect) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(%v)", node.Select)
+}
+
+func (node *ParenSelect) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Select,
+ )
+}
+
+// Union represents a UNION statement.
+type Union struct {
+ Type string
+ Left, Right SelectStatement
+ OrderBy OrderBy
+ Limit *Limit
+ Lock string
+}
+
+// Union.Type
+const (
+ UnionStr = "union"
+ UnionAllStr = "union all"
+ UnionDistinctStr = "union distinct"
+)
+
+// AddOrder adds an order by element
+func (node *Union) AddOrder(order *Order) {
+ node.OrderBy = append(node.OrderBy, order)
+}
+
+// SetLimit sets the limit clause
+func (node *Union) SetLimit(limit *Limit) {
+ node.Limit = limit
+}
+
+// Format formats the node.
+func (node *Union) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s %v%v%v%s", node.Left, node.Type, node.Right,
+ node.OrderBy, node.Limit, node.Lock)
+}
+
+func (node *Union) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.Right,
+ )
+}
+
+// Stream represents a SELECT statement.
+type Stream struct {
+ Comments Comments
+ SelectExpr SelectExpr
+ Table TableName
+}
+
+// Format formats the node.
+func (node *Stream) Format(buf *TrackedBuffer) {
+ buf.Myprintf("stream %v%v from %v",
+ node.Comments, node.SelectExpr, node.Table)
+}
+
+func (node *Stream) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.SelectExpr,
+ node.Table,
+ )
+}
+
+// Insert represents an INSERT or REPLACE statement.
+// Per the MySQL docs, http://dev.mysql.com/doc/refman/5.7/en/replace.html
+// Replace is the counterpart to `INSERT IGNORE`, and works exactly like a
+// normal INSERT except if the row exists. In that case it first deletes
+// the row and re-inserts with new values. For that reason we keep it as an Insert struct.
+// Replaces are currently disallowed in sharded schemas because
+// of the implications the deletion part may have on vindexes.
+// If you add fields here, consider adding them to calls to validateSubquerySamePlan.
+type Insert struct {
+ Action string
+ Comments Comments
+ Ignore string
+ Table TableName
+ Partitions Partitions
+ Columns Columns
+ Rows InsertRows
+ OnDup OnDup
+}
+
+// DDL strings.
+const (
+ InsertStr = "insert"
+ ReplaceStr = "replace"
+)
+
+// Format formats the node.
+func (node *Insert) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%s %v%sinto %v%v%v %v%v",
+ node.Action,
+ node.Comments, node.Ignore,
+ node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup)
+}
+
+func (node *Insert) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.Table,
+ node.Columns,
+ node.Rows,
+ node.OnDup,
+ )
+}
+
+// InsertRows represents the rows for an INSERT statement.
+type InsertRows interface {
+ iInsertRows()
+ SQLNode
+}
+
+func (*Select) iInsertRows() {}
+func (*Union) iInsertRows() {}
+func (Values) iInsertRows() {}
+func (*ParenSelect) iInsertRows() {}
+
+// Update represents an UPDATE statement.
+// If you add fields here, consider adding them to calls to validateSubquerySamePlan.
+type Update struct {
+ Comments Comments
+ TableExprs TableExprs
+ Exprs UpdateExprs
+ Where *Where
+ OrderBy OrderBy
+ Limit *Limit
+}
+
+// Format formats the node.
+func (node *Update) Format(buf *TrackedBuffer) {
+ buf.Myprintf("update %v%v set %v%v%v%v",
+ node.Comments, node.TableExprs,
+ node.Exprs, node.Where, node.OrderBy, node.Limit)
+}
+
+func (node *Update) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.TableExprs,
+ node.Exprs,
+ node.Where,
+ node.OrderBy,
+ node.Limit,
+ )
+}
+
+// Delete represents a DELETE statement.
+// If you add fields here, consider adding them to calls to validateSubquerySamePlan.
+type Delete struct {
+ Comments Comments
+ Targets TableNames
+ TableExprs TableExprs
+ Partitions Partitions
+ Where *Where
+ OrderBy OrderBy
+ Limit *Limit
+}
+
+// Format formats the node.
+func (node *Delete) Format(buf *TrackedBuffer) {
+ buf.Myprintf("delete %v", node.Comments)
+ if node.Targets != nil {
+ buf.Myprintf("%v ", node.Targets)
+ }
+ buf.Myprintf("from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit)
+}
+
+func (node *Delete) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.Targets,
+ node.TableExprs,
+ node.Where,
+ node.OrderBy,
+ node.Limit,
+ )
+}
+
+// Set represents a SET statement.
+type Set struct {
+ Comments Comments
+ Exprs SetExprs
+ Scope string
+}
+
+// Set.Scope or Show.Scope
+const (
+ SessionStr = "session"
+ GlobalStr = "global"
+)
+
+// Format formats the node.
+func (node *Set) Format(buf *TrackedBuffer) {
+ if node.Scope == "" {
+ buf.Myprintf("set %v%v", node.Comments, node.Exprs)
+ } else {
+ buf.Myprintf("set %v%s %v", node.Comments, node.Scope, node.Exprs)
+ }
+}
+
+func (node *Set) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Comments,
+ node.Exprs,
+ )
+}
+
+// DBDDL represents a CREATE, DROP database statement.
+type DBDDL struct {
+ Action string
+ DBName string
+ IfExists bool
+ Collate string
+ Charset string
+}
+
+// Format formats the node.
+func (node *DBDDL) Format(buf *TrackedBuffer) {
+ switch node.Action {
+ case CreateStr:
+ buf.WriteString(fmt.Sprintf("%s database %s", node.Action, node.DBName))
+ case DropStr:
+ exists := ""
+ if node.IfExists {
+ exists = " if exists"
+ }
+ buf.WriteString(fmt.Sprintf("%s database%s %v", node.Action, exists, node.DBName))
+ }
+}
+
+// walkSubtree walks the nodes of the subtree.
+func (node *DBDDL) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// DDL represents a CREATE, ALTER, DROP, RENAME or TRUNCATE statement.
+// Table is set for AlterStr, DropStr, RenameStr, TruncateStr
+// NewName is set for AlterStr, CreateStr, RenameStr.
+// VindexSpec is set for CreateVindexStr, DropVindexStr, AddColVindexStr, DropColVindexStr
+// VindexCols is set for AddColVindexStr
+type DDL struct {
+ Action string
+ Table TableName
+ NewName TableName
+ IfExists bool
+ TableSpec *TableSpec
+ PartitionSpec *PartitionSpec
+ VindexSpec *VindexSpec
+ VindexCols []ColIdent
+}
+
+// DDL strings.
+const (
+ CreateStr = "create"
+ AlterStr = "alter"
+ DropStr = "drop"
+ RenameStr = "rename"
+ DropIndexStr = "drop index"
+ CreateIndexStr = "create index"
+ CreateVindexStr = "create vindex"
+ AddColVindexStr = "add vindex"
+ DropColVindexStr = "drop vindex"
+
+ // Vindex DDL param to specify the owner of a vindex
+ VindexOwnerStr = "owner"
+)
+
+// Format formats the node.
+func (node *DDL) Format(buf *TrackedBuffer) {
+ switch node.Action {
+ case CreateStr:
+ if node.TableSpec == nil {
+ buf.Myprintf("%s table %v", node.Action, node.NewName)
+ } else {
+ buf.Myprintf("%s table %v %v", node.Action, node.NewName, node.TableSpec)
+ }
+ case DropStr:
+ exists := ""
+ if node.IfExists {
+ exists = " if exists"
+ }
+ buf.Myprintf("%s table%s %v", node.Action, exists, node.Table)
+ case CreateIndexStr:
+ buf.Myprintf("%s on %v", node.Action, node.Table)
+ case DropIndexStr:
+ exists := ""
+ if node.IfExists {
+ exists = " if exists"
+ }
+ buf.Myprintf("%s%s %v", node.Action, exists, node.Table)
+ case RenameStr:
+ buf.Myprintf("alter table %v %s to %v", node.Table, node.Action, node.NewName)
+ case AlterStr:
+ if node.PartitionSpec != nil {
+ buf.Myprintf("%s table %v %v", node.Action, node.Table, node.PartitionSpec)
+ } else {
+ buf.Myprintf("%s table %v", node.Action, node.Table)
+ }
+ case CreateVindexStr:
+ buf.Myprintf("%s %v %v", node.Action, node.VindexSpec.Name, node.VindexSpec)
+ case AddColVindexStr:
+ buf.Myprintf("alter table %v %s %v (", node.Table, node.Action, node.VindexSpec.Name)
+ for i, col := range node.VindexCols {
+ if i != 0 {
+ buf.Myprintf(", %v", col)
+ } else {
+ buf.Myprintf("%v", col)
+ }
+ }
+ buf.Myprintf(")")
+ if node.VindexSpec.Type.String() != "" {
+ buf.Myprintf(" %v", node.VindexSpec)
+ }
+ case DropColVindexStr:
+ buf.Myprintf("alter table %v %s %v", node.Table, node.Action, node.VindexSpec.Name)
+ default:
+ buf.Myprintf("%s table %v", node.Action, node.Table)
+ }
+}
+
+func (node *DDL) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Table,
+ node.NewName,
+ )
+}
+
+// Partition strings
+const (
+ ReorganizeStr = "reorganize partition"
+)
+
+// PartitionSpec describe partition actions (for alter and create)
+type PartitionSpec struct {
+ Action string
+ Name ColIdent
+ Definitions []*PartitionDefinition
+}
+
+// Format formats the node.
+func (node *PartitionSpec) Format(buf *TrackedBuffer) {
+ switch node.Action {
+ case ReorganizeStr:
+ buf.Myprintf("%s %v into (", node.Action, node.Name)
+ var prefix string
+ for _, pd := range node.Definitions {
+ buf.Myprintf("%s%v", prefix, pd)
+ prefix = ", "
+ }
+ buf.Myprintf(")")
+ default:
+ panic("unimplemented")
+ }
+}
+
+func (node *PartitionSpec) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ if err := Walk(visit, node.Name); err != nil {
+ return err
+ }
+ for _, def := range node.Definitions {
+ if err := Walk(visit, def); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// PartitionDefinition describes a very minimal partition definition
+type PartitionDefinition struct {
+ Name ColIdent
+ Limit Expr
+ Maxvalue bool
+}
+
+// Format formats the node
+func (node *PartitionDefinition) Format(buf *TrackedBuffer) {
+ if !node.Maxvalue {
+ buf.Myprintf("partition %v values less than (%v)", node.Name, node.Limit)
+ } else {
+ buf.Myprintf("partition %v values less than (maxvalue)", node.Name)
+ }
+}
+
+func (node *PartitionDefinition) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ node.Limit,
+ )
+}
+
+// TableSpec describes the structure of a table from a CREATE TABLE statement
+type TableSpec struct {
+ Columns []*ColumnDefinition
+ Indexes []*IndexDefinition
+ Options string
+}
+
+// Format formats the node.
+func (ts *TableSpec) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(\n")
+ for i, col := range ts.Columns {
+ if i == 0 {
+ buf.Myprintf("\t%v", col)
+ } else {
+ buf.Myprintf(",\n\t%v", col)
+ }
+ }
+ for _, idx := range ts.Indexes {
+ buf.Myprintf(",\n\t%v", idx)
+ }
+
+ buf.Myprintf("\n)%s", strings.Replace(ts.Options, ", ", ",\n ", -1))
+}
+
+// AddColumn appends the given column to the list in the spec
+func (ts *TableSpec) AddColumn(cd *ColumnDefinition) {
+ ts.Columns = append(ts.Columns, cd)
+}
+
+// AddIndex appends the given index to the list in the spec
+func (ts *TableSpec) AddIndex(id *IndexDefinition) {
+ ts.Indexes = append(ts.Indexes, id)
+}
+
+func (ts *TableSpec) walkSubtree(visit Visit) error {
+ if ts == nil {
+ return nil
+ }
+
+ for _, n := range ts.Columns {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+
+ for _, n := range ts.Indexes {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ColumnDefinition describes a column in a CREATE TABLE statement
+type ColumnDefinition struct {
+ Name ColIdent
+ Type ColumnType
+}
+
+// Format formats the node.
+func (col *ColumnDefinition) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %v", col.Name, &col.Type)
+}
+
+func (col *ColumnDefinition) walkSubtree(visit Visit) error {
+ if col == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ col.Name,
+ &col.Type,
+ )
+}
+
+// ColumnType represents a sql type in a CREATE TABLE statement
+// All optional fields are nil if not specified
+type ColumnType struct {
+ // The base type string
+ Type string
+
+ // Generic field options.
+ NotNull BoolVal
+ Autoincrement BoolVal
+ Default *SQLVal
+ OnUpdate *SQLVal
+ Comment *SQLVal
+
+ // Numeric field options
+ Length *SQLVal
+ Unsigned BoolVal
+ Zerofill BoolVal
+ Scale *SQLVal
+
+ // Text field options
+ Charset string
+ Collate string
+
+ // Enum values
+ EnumValues []string
+
+ // Key specification
+ KeyOpt ColumnKeyOption
+}
+
+// Format returns a canonical string representation of the type and all relevant options
+func (ct *ColumnType) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%s", ct.Type)
+
+ if ct.Length != nil && ct.Scale != nil {
+ buf.Myprintf("(%v,%v)", ct.Length, ct.Scale)
+
+ } else if ct.Length != nil {
+ buf.Myprintf("(%v)", ct.Length)
+ }
+
+ if ct.EnumValues != nil {
+ buf.Myprintf("(%s)", strings.Join(ct.EnumValues, ", "))
+ }
+
+ opts := make([]string, 0, 16)
+ if ct.Unsigned {
+ opts = append(opts, keywordStrings[UNSIGNED])
+ }
+ if ct.Zerofill {
+ opts = append(opts, keywordStrings[ZEROFILL])
+ }
+ if ct.NotNull {
+ opts = append(opts, keywordStrings[NOT], keywordStrings[NULL])
+ }
+ if ct.Default != nil {
+ opts = append(opts, keywordStrings[DEFAULT], String(ct.Default))
+ }
+ if ct.OnUpdate != nil {
+ opts = append(opts, keywordStrings[ON], keywordStrings[UPDATE], String(ct.OnUpdate))
+ }
+ if ct.Autoincrement {
+ opts = append(opts, keywordStrings[AUTO_INCREMENT])
+ }
+ if ct.KeyOpt == colKeyPrimary {
+ opts = append(opts, keywordStrings[PRIMARY], keywordStrings[KEY])
+ }
+ if ct.KeyOpt == colKeyUnique {
+ opts = append(opts, keywordStrings[UNIQUE])
+ }
+ if ct.KeyOpt == colKeyUniqueKey {
+ opts = append(opts, keywordStrings[UNIQUE], keywordStrings[KEY])
+ }
+ if ct.KeyOpt == colKey {
+ opts = append(opts, keywordStrings[KEY])
+ }
+
+ if len(opts) != 0 {
+ buf.Myprintf(" %s", strings.Join(opts, " "))
+ }
+}
+
+// DescribeType returns the abbreviated type information as required for
+// describe table
+func (ct *ColumnType) DescribeType() string {
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf("%s", ct.Type)
+ if ct.Length != nil && ct.Scale != nil {
+ buf.Myprintf("(%v,%v)", ct.Length, ct.Scale)
+ } else if ct.Length != nil {
+ buf.Myprintf("(%v)", ct.Length)
+ }
+
+ opts := make([]string, 0, 16)
+ if ct.Unsigned {
+ opts = append(opts, keywordStrings[UNSIGNED])
+ }
+ if ct.Zerofill {
+ opts = append(opts, keywordStrings[ZEROFILL])
+ }
+ if len(opts) != 0 {
+ buf.Myprintf(" %s", strings.Join(opts, " "))
+ }
+ return buf.String()
+}
+
+// SQLType returns the sqltypes type code for the given column
+func (ct *ColumnType) SQLType() querypb.Type {
+ switch ct.Type {
+ case keywordStrings[TINYINT]:
+ if ct.Unsigned {
+ return sqltypes.Uint8
+ }
+ return sqltypes.Int8
+ case keywordStrings[SMALLINT]:
+ if ct.Unsigned {
+ return sqltypes.Uint16
+ }
+ return sqltypes.Int16
+ case keywordStrings[MEDIUMINT]:
+ if ct.Unsigned {
+ return sqltypes.Uint24
+ }
+ return sqltypes.Int24
+ case keywordStrings[INT]:
+ fallthrough
+ case keywordStrings[INTEGER]:
+ if ct.Unsigned {
+ return sqltypes.Uint32
+ }
+ return sqltypes.Int32
+ case keywordStrings[BIGINT]:
+ if ct.Unsigned {
+ return sqltypes.Uint64
+ }
+ return sqltypes.Int64
+ case keywordStrings[TEXT]:
+ return sqltypes.Text
+ case keywordStrings[TINYTEXT]:
+ return sqltypes.Text
+ case keywordStrings[MEDIUMTEXT]:
+ return sqltypes.Text
+ case keywordStrings[LONGTEXT]:
+ return sqltypes.Text
+ case keywordStrings[BLOB]:
+ return sqltypes.Blob
+ case keywordStrings[TINYBLOB]:
+ return sqltypes.Blob
+ case keywordStrings[MEDIUMBLOB]:
+ return sqltypes.Blob
+ case keywordStrings[LONGBLOB]:
+ return sqltypes.Blob
+ case keywordStrings[CHAR]:
+ return sqltypes.Char
+ case keywordStrings[VARCHAR]:
+ return sqltypes.VarChar
+ case keywordStrings[DATE]:
+ return sqltypes.Date
+ case keywordStrings[TIME]:
+ return sqltypes.Time
+ case keywordStrings[DATETIME]:
+ return sqltypes.Datetime
+ case keywordStrings[TIMESTAMP]:
+ return sqltypes.Timestamp
+ case keywordStrings[YEAR]:
+ return sqltypes.Year
+ case keywordStrings[FLOAT_TYPE]:
+ return sqltypes.Float32
+ case keywordStrings[DOUBLE]:
+ return sqltypes.Float64
+ case keywordStrings[DECIMAL]:
+ return sqltypes.Decimal
+ case keywordStrings[SET]:
+ return sqltypes.Set
+ }
+ panic("unimplemented type " + ct.Type)
+}
+
+func (ct *ColumnType) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// IndexDefinition describes an index in a CREATE TABLE statement
+type IndexDefinition struct {
+ Info *IndexInfo
+ Columns []*IndexColumn
+ Options []*IndexOption
+}
+
+// Format formats the node.
+func (idx *IndexDefinition) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v (", idx.Info)
+ for i, col := range idx.Columns {
+ if i != 0 {
+ buf.Myprintf(", %v", col.Column)
+ } else {
+ buf.Myprintf("%v", col.Column)
+ }
+ if col.Length != nil {
+ buf.Myprintf("(%v)", col.Length)
+ }
+ }
+ buf.Myprintf(")")
+
+ for _, opt := range idx.Options {
+ buf.Myprintf(" %s", opt.Name)
+ if opt.Using != "" {
+ buf.Myprintf(" %s", opt.Using)
+ } else {
+ buf.Myprintf(" %v", opt.Value)
+ }
+ }
+}
+
+func (idx *IndexDefinition) walkSubtree(visit Visit) error {
+ if idx == nil {
+ return nil
+ }
+
+ for _, n := range idx.Columns {
+ if err := Walk(visit, n.Column); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// IndexInfo describes the name and type of an index in a CREATE TABLE statement
+type IndexInfo struct {
+ Type string
+ Name ColIdent
+ Primary bool
+ Spatial bool
+ Unique bool
+}
+
+// Format formats the node.
+func (ii *IndexInfo) Format(buf *TrackedBuffer) {
+ if ii.Primary {
+ buf.Myprintf("%s", ii.Type)
+ } else {
+ buf.Myprintf("%s %v", ii.Type, ii.Name)
+ }
+}
+
+func (ii *IndexInfo) walkSubtree(visit Visit) error {
+ return Walk(visit, ii.Name)
+}
+
+// IndexColumn describes a column in an index definition with optional length
+type IndexColumn struct {
+ Column ColIdent
+ Length *SQLVal
+}
+
+// LengthScaleOption is used for types that have an optional length
+// and scale
+type LengthScaleOption struct {
+ Length *SQLVal
+ Scale *SQLVal
+}
+
+// IndexOption is used for trailing options for indexes: COMMENT, KEY_BLOCK_SIZE, USING
+type IndexOption struct {
+ Name string
+ Value *SQLVal
+ Using string
+}
+
+// ColumnKeyOption indicates whether or not the given column is defined as an
+// index element and contains the type of the option
+type ColumnKeyOption int
+
+const (
+ colKeyNone ColumnKeyOption = iota
+ colKeyPrimary
+ colKeySpatialKey
+ colKeyUnique
+ colKeyUniqueKey
+ colKey
+)
+
+// VindexSpec defines a vindex for a CREATE VINDEX or DROP VINDEX statement
+type VindexSpec struct {
+ Name ColIdent
+ Type ColIdent
+ Params []VindexParam
+}
+
+// ParseParams parses the vindex parameter list, pulling out the special-case
+// "owner" parameter
+func (node *VindexSpec) ParseParams() (string, map[string]string) {
+ var owner string
+ params := map[string]string{}
+ for _, p := range node.Params {
+ if p.Key.Lowered() == VindexOwnerStr {
+ owner = p.Val
+ } else {
+ params[p.Key.String()] = p.Val
+ }
+ }
+ return owner, params
+}
+
+// Format formats the node. The "CREATE VINDEX" preamble was formatted in
+// the containing DDL node Format, so this just prints the type, any
+// parameters, and optionally the owner
+func (node *VindexSpec) Format(buf *TrackedBuffer) {
+ buf.Myprintf("using %v", node.Type)
+
+ numParams := len(node.Params)
+ if numParams != 0 {
+ buf.Myprintf(" with ")
+ for i, p := range node.Params {
+ if i != 0 {
+ buf.Myprintf(", ")
+ }
+ buf.Myprintf("%v", p)
+ }
+ }
+}
+
+func (node *VindexSpec) walkSubtree(visit Visit) error {
+ err := Walk(visit,
+ node.Name,
+ )
+
+ if err != nil {
+ return err
+ }
+
+ for _, p := range node.Params {
+ err := Walk(visit, p)
+
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// VindexParam defines a key/value parameter for a CREATE VINDEX statement
+type VindexParam struct {
+ Key ColIdent
+ Val string
+}
+
+// Format formats the node.
+func (node VindexParam) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%s=%s", node.Key.String(), node.Val)
+}
+
+func (node VindexParam) walkSubtree(visit Visit) error {
+ return Walk(visit,
+ node.Key,
+ )
+}
+
+// Show represents a show statement.
+type Show struct {
+ Type string
+ OnTable TableName
+ ShowCreate bool
+}
+
+// Format formats the node.
+func (node *Show) Format(buf *TrackedBuffer) {
+ buf.Myprintf("show ")
+ if node.ShowCreate {
+ buf.Myprintf("create ")
+ }
+ buf.Myprintf("%s", node.Type)
+ if node.HasOnTable() {
+ buf.Myprintf(" %v", node.OnTable)
+ }
+}
+
+// HasOnTable returns true if the show statement has an "on" clause
+func (node *Show) HasOnTable() bool {
+ return node.OnTable.Name.v != ""
+}
+
+func (node *Show) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// Comments represents a list of comments.
+type Comments [][]byte
+
+// Format formats the node.
+func (node Comments) Format(buf *TrackedBuffer) {
+ for _, c := range node {
+ buf.Myprintf("%s ", c)
+ }
+}
+
+func (node Comments) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// SelectExprs represents SELECT expressions.
+type SelectExprs []SelectExpr
+
+// Format formats the node.
+func (node SelectExprs) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node SelectExprs) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SelectExpr represents a SELECT expression.
+type SelectExpr interface {
+ iSelectExpr()
+ SQLNode
+}
+
+func (*StarExpr) iSelectExpr() {}
+func (*AliasedExpr) iSelectExpr() {}
+func (Nextval) iSelectExpr() {}
+
+// StarExpr defines a '*' or 'table.*' expression.
+type StarExpr struct {
+ TableName TableName
+}
+
+// Format formats the node.
+func (node *StarExpr) Format(buf *TrackedBuffer) {
+ if !node.TableName.IsEmpty() {
+ buf.Myprintf("%v.", node.TableName)
+ }
+ buf.Myprintf("*")
+}
+
+func (node *StarExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.TableName,
+ )
+}
+
+// AliasedExpr defines an aliased SELECT expression.
+type AliasedExpr struct {
+ Expr Expr
+ As ColIdent
+}
+
+// Format formats the node.
+func (node *AliasedExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v", node.Expr)
+ if !node.As.IsEmpty() {
+ buf.Myprintf(" as %v", node.As)
+ }
+}
+
+func (node *AliasedExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ node.As,
+ )
+}
+
+// Nextval defines the NEXT VALUE expression.
+type Nextval struct {
+ Expr Expr
+}
+
+// Format formats the node.
+func (node Nextval) Format(buf *TrackedBuffer) {
+ buf.Myprintf("next %v values", node.Expr)
+}
+
+func (node Nextval) walkSubtree(visit Visit) error {
+ return Walk(visit, node.Expr)
+}
+
+// Columns represents an insert column list.
+type Columns []ColIdent
+
+// Format formats the node.
+func (node Columns) Format(buf *TrackedBuffer) {
+ if node == nil {
+ return
+ }
+ prefix := "("
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+ buf.WriteString(")")
+}
+
+func (node Columns) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// FindColumn finds a column in the column list, returning
+// the index if it exists or -1 otherwise
+func (node Columns) FindColumn(col ColIdent) int {
+ for i, colName := range node {
+ if colName.Equal(col) {
+ return i
+ }
+ }
+ return -1
+}
+
+// Partitions is a type alias for Columns so we can handle printing efficiently
+type Partitions Columns
+
+// Format formats the node
+func (node Partitions) Format(buf *TrackedBuffer) {
+ if node == nil {
+ return
+ }
+ prefix := " partition ("
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+ buf.WriteString(")")
+}
+
+func (node Partitions) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TableExprs represents a list of table expressions.
+type TableExprs []TableExpr
+
+// Format formats the node.
+func (node TableExprs) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node TableExprs) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TableExpr represents a table expression.
+type TableExpr interface {
+ iTableExpr()
+ SQLNode
+}
+
+func (*AliasedTableExpr) iTableExpr() {}
+func (*ParenTableExpr) iTableExpr() {}
+func (*JoinTableExpr) iTableExpr() {}
+
+// AliasedTableExpr represents a table expression
+// coupled with an optional alias or index hint.
+// If As is empty, no alias was used.
+type AliasedTableExpr struct {
+ Expr SimpleTableExpr
+ Partitions Partitions
+ As TableIdent
+ Hints *IndexHints
+}
+
+// Format formats the node.
+func (node *AliasedTableExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v%v", node.Expr, node.Partitions)
+ if !node.As.IsEmpty() {
+ buf.Myprintf(" as %v", node.As)
+ }
+ if node.Hints != nil {
+ // Hint node provides the space padding.
+ buf.Myprintf("%v", node.Hints)
+ }
+}
+
+func (node *AliasedTableExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ node.As,
+ node.Hints,
+ )
+}
+
+// RemoveHints returns a new AliasedTableExpr with the hints removed.
+func (node *AliasedTableExpr) RemoveHints() *AliasedTableExpr {
+ noHints := *node
+ noHints.Hints = nil
+ return &noHints
+}
+
+// SimpleTableExpr represents a simple table expression.
+type SimpleTableExpr interface {
+ iSimpleTableExpr()
+ SQLNode
+}
+
+func (TableName) iSimpleTableExpr() {}
+func (*Subquery) iSimpleTableExpr() {}
+
+// TableNames is a list of TableName.
+type TableNames []TableName
+
+// Format formats the node.
+func (node TableNames) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node TableNames) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// TableName represents a table name.
+// Qualifier, if specified, represents a database or keyspace.
+// TableName is a value struct whose fields are case sensitive.
+// This means two TableName vars can be compared for equality
+// and a TableName can also be used as key in a map.
+type TableName struct {
+ Name, Qualifier TableIdent
+}
+
+// Format formats the node.
+func (node TableName) Format(buf *TrackedBuffer) {
+ if node.IsEmpty() {
+ return
+ }
+ if !node.Qualifier.IsEmpty() {
+ buf.Myprintf("%v.", node.Qualifier)
+ }
+ buf.Myprintf("%v", node.Name)
+}
+
+func (node TableName) walkSubtree(visit Visit) error {
+ return Walk(
+ visit,
+ node.Name,
+ node.Qualifier,
+ )
+}
+
+// IsEmpty returns true if TableName is nil or empty.
+func (node TableName) IsEmpty() bool {
+ // If Name is empty, Qualifer is also empty.
+ return node.Name.IsEmpty()
+}
+
+// ToViewName returns a TableName acceptable for use as a VIEW. VIEW names are
+// always lowercase, so ToViewName lowercasese the name. Databases are case-sensitive
+// so Qualifier is left untouched.
+func (node TableName) ToViewName() TableName {
+ return TableName{
+ Qualifier: node.Qualifier,
+ Name: NewTableIdent(strings.ToLower(node.Name.v)),
+ }
+}
+
+// ParenTableExpr represents a parenthesized list of TableExpr.
+type ParenTableExpr struct {
+ Exprs TableExprs
+}
+
+// Format formats the node.
+func (node *ParenTableExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(%v)", node.Exprs)
+}
+
+func (node *ParenTableExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Exprs,
+ )
+}
+
+// JoinCondition represents the join conditions (either a ON or USING clause)
+// of a JoinTableExpr.
+type JoinCondition struct {
+ On Expr
+ Using Columns
+}
+
+// Format formats the node.
+func (node JoinCondition) Format(buf *TrackedBuffer) {
+ if node.On != nil {
+ buf.Myprintf(" on %v", node.On)
+ }
+ if node.Using != nil {
+ buf.Myprintf(" using %v", node.Using)
+ }
+}
+
+func (node JoinCondition) walkSubtree(visit Visit) error {
+ return Walk(
+ visit,
+ node.On,
+ node.Using,
+ )
+}
+
+// JoinTableExpr represents a TableExpr that's a JOIN operation.
+type JoinTableExpr struct {
+ LeftExpr TableExpr
+ Join string
+ RightExpr TableExpr
+ Condition JoinCondition
+}
+
+// JoinTableExpr.Join
+const (
+ JoinStr = "join"
+ InnerJoinStr = "inner join"
+ CrossJoinStr = "cross join"
+ StraightJoinStr = "straight_join"
+ LeftJoinStr = "left join"
+ NaturalJoinStr = "natural join"
+ NaturalLeftJoinStr = "natural left join"
+)
+
+// Format formats the node.
+func (node *JoinTableExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s %v%v", node.LeftExpr, node.Join, node.RightExpr, node.Condition)
+}
+
+func (node *JoinTableExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.LeftExpr,
+ node.RightExpr,
+ node.Condition,
+ )
+}
+
+// IndexHints represents a list of index hints.
+type IndexHints struct {
+ Type string
+ Indexes []ColIdent
+}
+
+// Index hints.
+const (
+ UseStr = "use "
+ IgnoreStr = "ignore "
+ ForceStr = "force "
+)
+
+// Format formats the node.
+func (node *IndexHints) Format(buf *TrackedBuffer) {
+ buf.Myprintf(" %sindex ", node.Type)
+ prefix := "("
+ for _, n := range node.Indexes {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+ buf.Myprintf(")")
+}
+
+func (node *IndexHints) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ for _, n := range node.Indexes {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Where represents a WHERE or HAVING clause.
+type Where struct {
+ Type string
+ Expr Expr
+}
+
+// Where.Type
+const (
+ WhereStr = "where"
+ HavingStr = "having"
+)
+
+// NewWhere creates a WHERE or HAVING clause out
+// of a Expr. If the expression is nil, it returns nil.
+func NewWhere(typ string, expr Expr) *Where {
+ if expr == nil {
+ return nil
+ }
+ return &Where{Type: typ, Expr: expr}
+}
+
+// Format formats the node.
+func (node *Where) Format(buf *TrackedBuffer) {
+ if node == nil || node.Expr == nil {
+ return
+ }
+ buf.Myprintf(" %s %v", node.Type, node.Expr)
+}
+
+func (node *Where) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+// Expr represents an expression.
+type Expr interface {
+ iExpr()
+ // replace replaces any subexpression that matches
+ // from with to. The implementation can use the
+ // replaceExprs convenience function.
+ replace(from, to Expr) bool
+ SQLNode
+}
+
+func (*AndExpr) iExpr() {}
+func (*OrExpr) iExpr() {}
+func (*NotExpr) iExpr() {}
+func (*ParenExpr) iExpr() {}
+func (*ComparisonExpr) iExpr() {}
+func (*RangeCond) iExpr() {}
+func (*IsExpr) iExpr() {}
+func (*ExistsExpr) iExpr() {}
+func (*SQLVal) iExpr() {}
+func (*NullVal) iExpr() {}
+func (BoolVal) iExpr() {}
+func (*ColName) iExpr() {}
+func (ValTuple) iExpr() {}
+func (*Subquery) iExpr() {}
+func (ListArg) iExpr() {}
+func (*BinaryExpr) iExpr() {}
+func (*UnaryExpr) iExpr() {}
+func (*IntervalExpr) iExpr() {}
+func (*CollateExpr) iExpr() {}
+func (*FuncExpr) iExpr() {}
+func (*CaseExpr) iExpr() {}
+func (*ValuesFuncExpr) iExpr() {}
+func (*ConvertExpr) iExpr() {}
+func (*SubstrExpr) iExpr() {}
+func (*ConvertUsingExpr) iExpr() {}
+func (*MatchExpr) iExpr() {}
+func (*GroupConcatExpr) iExpr() {}
+func (*Default) iExpr() {}
+func (*TimeExpr) iExpr() {}
+
+// ReplaceExpr finds the from expression from root
+// and replaces it with to. If from matches root,
+// then to is returned.
+func ReplaceExpr(root, from, to Expr) Expr {
+ if root == from {
+ return to
+ }
+ root.replace(from, to)
+ return root
+}
+
+// replaceExprs is a convenience function used by implementors
+// of the replace method.
+func replaceExprs(from, to Expr, exprs ...*Expr) bool {
+ for _, expr := range exprs {
+ if *expr == nil {
+ continue
+ }
+ if *expr == from {
+ *expr = to
+ return true
+ }
+ if (*expr).replace(from, to) {
+ return true
+ }
+ }
+ return false
+}
+
+// Exprs represents a list of value expressions.
+// It's not a valid expression because it's not parenthesized.
+type Exprs []Expr
+
+// Format formats the node.
+func (node Exprs) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node Exprs) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// AndExpr represents an AND expression.
+type AndExpr struct {
+ Left, Right Expr
+}
+
+// Format formats the node.
+func (node *AndExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v and %v", node.Left, node.Right)
+}
+
+func (node *AndExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.Right,
+ )
+}
+
+func (node *AndExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Left, &node.Right)
+}
+
+// OrExpr represents an OR expression.
+type OrExpr struct {
+ Left, Right Expr
+}
+
+// Format formats the node.
+func (node *OrExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v or %v", node.Left, node.Right)
+}
+
+func (node *OrExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.Right,
+ )
+}
+
+func (node *OrExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Left, &node.Right)
+}
+
+// NotExpr represents a NOT expression.
+type NotExpr struct {
+ Expr Expr
+}
+
+// Format formats the node.
+func (node *NotExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("not %v", node.Expr)
+}
+
+func (node *NotExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *NotExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// ParenExpr represents a parenthesized boolean expression.
+type ParenExpr struct {
+ Expr Expr
+}
+
+// Format formats the node.
+func (node *ParenExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(%v)", node.Expr)
+}
+
+func (node *ParenExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *ParenExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// ComparisonExpr represents a two-value comparison expression.
+type ComparisonExpr struct {
+ Operator string
+ Left, Right Expr
+ Escape Expr
+}
+
+// ComparisonExpr.Operator
+const (
+ EqualStr = "="
+ LessThanStr = "<"
+ GreaterThanStr = ">"
+ LessEqualStr = "<="
+ GreaterEqualStr = ">="
+ NotEqualStr = "!="
+ NullSafeEqualStr = "<=>"
+ NullSafeNotEqualStr = "<>"
+ InStr = "in"
+ NotInStr = "not in"
+ LikeStr = "like"
+ NotLikeStr = "not like"
+ RegexpStr = "regexp"
+ NotRegexpStr = "not regexp"
+)
+
+// Format formats the node.
+func (node *ComparisonExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right)
+ if node.Escape != nil {
+ buf.Myprintf(" escape %v", node.Escape)
+ }
+}
+
+func (node *ComparisonExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.Right,
+ node.Escape,
+ )
+}
+
+func (node *ComparisonExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Left, &node.Right, &node.Escape)
+}
+
+// RangeCond represents a BETWEEN or a NOT BETWEEN expression.
+type RangeCond struct {
+ Operator string
+ Left Expr
+ From, To Expr
+}
+
+// RangeCond.Operator
+const (
+ BetweenStr = "between"
+ NotBetweenStr = "not between"
+)
+
+// Format formats the node.
+func (node *RangeCond) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s %v and %v", node.Left, node.Operator, node.From, node.To)
+}
+
+func (node *RangeCond) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.From,
+ node.To,
+ )
+}
+
+func (node *RangeCond) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Left, &node.From, &node.To)
+}
+
+// IsExpr represents an IS ... or an IS NOT ... expression.
+type IsExpr struct {
+ Operator string
+ Expr Expr
+}
+
+// IsExpr.Operator
+const (
+ IsNullStr = "is null"
+ IsNotNullStr = "is not null"
+ IsTrueStr = "is true"
+ IsNotTrueStr = "is not true"
+ IsFalseStr = "is false"
+ IsNotFalseStr = "is not false"
+)
+
+// Format formats the node.
+func (node *IsExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s", node.Expr, node.Operator)
+}
+
+func (node *IsExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *IsExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// ExistsExpr represents an EXISTS expression.
+type ExistsExpr struct {
+ Subquery *Subquery
+}
+
+// Format formats the node.
+func (node *ExistsExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("exists %v", node.Subquery)
+}
+
+func (node *ExistsExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Subquery,
+ )
+}
+
+func (node *ExistsExpr) replace(from, to Expr) bool {
+ return false
+}
+
+// ExprFromValue converts the given Value into an Expr or returns an error.
+func ExprFromValue(value sqltypes.Value) (Expr, error) {
+ // The type checks here follow the rules defined in sqltypes/types.go.
+ switch {
+ case value.Type() == sqltypes.Null:
+ return &NullVal{}, nil
+ case value.IsIntegral():
+ return NewIntVal(value.ToBytes()), nil
+ case value.IsFloat() || value.Type() == sqltypes.Decimal:
+ return NewFloatVal(value.ToBytes()), nil
+ case value.IsQuoted():
+ return NewStrVal(value.ToBytes()), nil
+ default:
+ // We cannot support sqltypes.Expression, or any other invalid type.
+ return nil, fmt.Errorf("cannot convert value %v to AST", value)
+ }
+}
+
+// ValType specifies the type for SQLVal.
+type ValType int
+
+// These are the possible Valtype values.
+// HexNum represents a 0x... value. It cannot
+// be treated as a simple value because it can
+// be interpreted differently depending on the
+// context.
+const (
+ StrVal = ValType(iota)
+ IntVal
+ FloatVal
+ HexNum
+ HexVal
+ ValArg
+ BitVal
+)
+
+// SQLVal represents a single value.
+type SQLVal struct {
+ Type ValType
+ Val []byte
+}
+
+// NewStrVal builds a new StrVal.
+func NewStrVal(in []byte) *SQLVal {
+ return &SQLVal{Type: StrVal, Val: in}
+}
+
+// NewIntVal builds a new IntVal.
+func NewIntVal(in []byte) *SQLVal {
+ return &SQLVal{Type: IntVal, Val: in}
+}
+
+// NewFloatVal builds a new FloatVal.
+func NewFloatVal(in []byte) *SQLVal {
+ return &SQLVal{Type: FloatVal, Val: in}
+}
+
+// NewHexNum builds a new HexNum.
+func NewHexNum(in []byte) *SQLVal {
+ return &SQLVal{Type: HexNum, Val: in}
+}
+
+// NewHexVal builds a new HexVal.
+func NewHexVal(in []byte) *SQLVal {
+ return &SQLVal{Type: HexVal, Val: in}
+}
+
+// NewBitVal builds a new BitVal containing a bit literal.
+func NewBitVal(in []byte) *SQLVal {
+ return &SQLVal{Type: BitVal, Val: in}
+}
+
+// NewValArg builds a new ValArg.
+func NewValArg(in []byte) *SQLVal {
+ return &SQLVal{Type: ValArg, Val: in}
+}
+
+// Format formats the node.
+func (node *SQLVal) Format(buf *TrackedBuffer) {
+ switch node.Type {
+ case StrVal:
+ sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val).EncodeSQL(buf)
+ case IntVal, FloatVal, HexNum:
+ buf.Myprintf("%s", []byte(node.Val))
+ case HexVal:
+ buf.Myprintf("X'%s'", []byte(node.Val))
+ case BitVal:
+ buf.Myprintf("B'%s'", []byte(node.Val))
+ case ValArg:
+ buf.WriteArg(string(node.Val))
+ default:
+ panic("unexpected")
+ }
+}
+
+func (node *SQLVal) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node *SQLVal) replace(from, to Expr) bool {
+ return false
+}
+
+// HexDecode decodes the hexval into bytes.
+func (node *SQLVal) HexDecode() ([]byte, error) {
+ dst := make([]byte, hex.DecodedLen(len([]byte(node.Val))))
+ _, err := hex.Decode(dst, []byte(node.Val))
+ if err != nil {
+ return nil, err
+ }
+ return dst, err
+}
+
+// NullVal represents a NULL value.
+type NullVal struct{}
+
+// Format formats the node.
+func (node *NullVal) Format(buf *TrackedBuffer) {
+ buf.Myprintf("null")
+}
+
+func (node *NullVal) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node *NullVal) replace(from, to Expr) bool {
+ return false
+}
+
+// BoolVal is true or false.
+type BoolVal bool
+
+// Format formats the node.
+func (node BoolVal) Format(buf *TrackedBuffer) {
+ if node {
+ buf.Myprintf("true")
+ } else {
+ buf.Myprintf("false")
+ }
+}
+
+func (node BoolVal) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node BoolVal) replace(from, to Expr) bool {
+ return false
+}
+
+// ColName represents a column name.
+type ColName struct {
+ // Metadata is not populated by the parser.
+ // It's a placeholder for analyzers to store
+ // additional data, typically info about which
+ // table or column this node references.
+ Metadata interface{}
+ Name ColIdent
+ Qualifier TableName
+}
+
+// Format formats the node.
+func (node *ColName) Format(buf *TrackedBuffer) {
+ if !node.Qualifier.IsEmpty() {
+ buf.Myprintf("%v.", node.Qualifier)
+ }
+ buf.Myprintf("%v", node.Name)
+}
+
+func (node *ColName) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ node.Qualifier,
+ )
+}
+
+func (node *ColName) replace(from, to Expr) bool {
+ return false
+}
+
+// Equal returns true if the column names match.
+func (node *ColName) Equal(c *ColName) bool {
+ // Failsafe: ColName should not be empty.
+ if node == nil || c == nil {
+ return false
+ }
+ return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier
+}
+
+// ColTuple represents a list of column values.
+// It can be ValTuple, Subquery, ListArg.
+type ColTuple interface {
+ iColTuple()
+ Expr
+}
+
+func (ValTuple) iColTuple() {}
+func (*Subquery) iColTuple() {}
+func (ListArg) iColTuple() {}
+
+// ValTuple represents a tuple of actual values.
+type ValTuple Exprs
+
+// Format formats the node.
+func (node ValTuple) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(%v)", Exprs(node))
+}
+
+func (node ValTuple) walkSubtree(visit Visit) error {
+ return Walk(visit, Exprs(node))
+}
+
+func (node ValTuple) replace(from, to Expr) bool {
+ for i := range node {
+ if replaceExprs(from, to, &node[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+// Subquery represents a subquery.
+type Subquery struct {
+ Select SelectStatement
+}
+
+// Format formats the node.
+func (node *Subquery) Format(buf *TrackedBuffer) {
+ buf.Myprintf("(%v)", node.Select)
+}
+
+func (node *Subquery) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Select,
+ )
+}
+
+func (node *Subquery) replace(from, to Expr) bool {
+ return false
+}
+
+// ListArg represents a named list argument.
+type ListArg []byte
+
+// Format formats the node.
+func (node ListArg) Format(buf *TrackedBuffer) {
+ buf.WriteArg(string(node))
+}
+
+func (node ListArg) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node ListArg) replace(from, to Expr) bool {
+ return false
+}
+
+// BinaryExpr represents a binary value expression.
+type BinaryExpr struct {
+ Operator string
+ Left, Right Expr
+}
+
+// BinaryExpr.Operator
+const (
+ BitAndStr = "&"
+ BitOrStr = "|"
+ BitXorStr = "^"
+ PlusStr = "+"
+ MinusStr = "-"
+ MultStr = "*"
+ DivStr = "/"
+ IntDivStr = "div"
+ ModStr = "%"
+ ShiftLeftStr = "<<"
+ ShiftRightStr = ">>"
+)
+
+// Format formats the node.
+func (node *BinaryExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right)
+}
+
+func (node *BinaryExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Left,
+ node.Right,
+ )
+}
+
+func (node *BinaryExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Left, &node.Right)
+}
+
+// UnaryExpr represents a unary value expression.
+type UnaryExpr struct {
+ Operator string
+ Expr Expr
+}
+
+// UnaryExpr.Operator
+const (
+ UPlusStr = "+"
+ UMinusStr = "-"
+ TildaStr = "~"
+ BangStr = "!"
+ BinaryStr = "binary "
+ UBinaryStr = "_binary "
+)
+
+// Format formats the node.
+func (node *UnaryExpr) Format(buf *TrackedBuffer) {
+ if _, unary := node.Expr.(*UnaryExpr); unary {
+ buf.Myprintf("%s %v", node.Operator, node.Expr)
+ return
+ }
+ buf.Myprintf("%s%v", node.Operator, node.Expr)
+}
+
+func (node *UnaryExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *UnaryExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// IntervalExpr represents a date-time INTERVAL expression.
+type IntervalExpr struct {
+ Expr Expr
+ Unit string
+}
+
+// Format formats the node.
+func (node *IntervalExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("interval %v %s", node.Expr, node.Unit)
+}
+
+func (node *IntervalExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *IntervalExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// CollateExpr represents dynamic collate operator.
+type CollateExpr struct {
+ Expr Expr
+ Charset string
+}
+
+// Format formats the node.
+func (node *CollateExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v collate %s", node.Expr, node.Charset)
+}
+
+func (node *CollateExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *CollateExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// TimeExpr represents a time expression.
+type TimeExpr struct {
+ Expr ColIdent
+}
+
+// Format formats the node.
+func (node *TimeExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf(node.Expr.Lowered())
+}
+
+func (node *TimeExpr) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node *TimeExpr) replace(from, to Expr) bool {
+ return false
+}
+
+// FuncExpr represents a function call.
+type FuncExpr struct {
+ Qualifier TableIdent
+ Name ColIdent
+ Distinct bool
+ Exprs SelectExprs
+}
+
+// Format formats the node.
+func (node *FuncExpr) Format(buf *TrackedBuffer) {
+ var distinct string
+ if node.Distinct {
+ distinct = "distinct "
+ }
+ if !node.Qualifier.IsEmpty() {
+ buf.Myprintf("%v.", node.Qualifier)
+ }
+ // Function names should not be back-quoted even
+ // if they match a reserved word. So, print the
+ // name as is.
+ buf.Myprintf("%s(%s%v)", node.Name.String(), distinct, node.Exprs)
+}
+
+func (node *FuncExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Qualifier,
+ node.Name,
+ node.Exprs,
+ )
+}
+
+func (node *FuncExpr) replace(from, to Expr) bool {
+ for _, sel := range node.Exprs {
+ aliased, ok := sel.(*AliasedExpr)
+ if !ok {
+ continue
+ }
+ if replaceExprs(from, to, &aliased.Expr) {
+ return true
+ }
+ }
+ return false
+}
+
+// Aggregates is a map of all aggregate functions.
+var Aggregates = map[string]bool{
+ "avg": true,
+ "bit_and": true,
+ "bit_or": true,
+ "bit_xor": true,
+ "count": true,
+ "group_concat": true,
+ "max": true,
+ "min": true,
+ "std": true,
+ "stddev_pop": true,
+ "stddev_samp": true,
+ "stddev": true,
+ "sum": true,
+ "var_pop": true,
+ "var_samp": true,
+ "variance": true,
+}
+
+// IsAggregate returns true if the function is an aggregate.
+func (node *FuncExpr) IsAggregate() bool {
+ return Aggregates[node.Name.Lowered()]
+}
+
+// GroupConcatExpr represents a call to GROUP_CONCAT
+type GroupConcatExpr struct {
+ Distinct string
+ Exprs SelectExprs
+ OrderBy OrderBy
+ Separator string
+}
+
+// Format formats the node
+func (node *GroupConcatExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("group_concat(%s%v%v%s)", node.Distinct, node.Exprs, node.OrderBy, node.Separator)
+}
+
+func (node *GroupConcatExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Exprs,
+ node.OrderBy,
+ )
+}
+
+func (node *GroupConcatExpr) replace(from, to Expr) bool {
+ for _, sel := range node.Exprs {
+ aliased, ok := sel.(*AliasedExpr)
+ if !ok {
+ continue
+ }
+ if replaceExprs(from, to, &aliased.Expr) {
+ return true
+ }
+ }
+ for _, order := range node.OrderBy {
+ if replaceExprs(from, to, &order.Expr) {
+ return true
+ }
+ }
+ return false
+}
+
+// ValuesFuncExpr represents a function call.
+type ValuesFuncExpr struct {
+ Name *ColName
+}
+
+// Format formats the node.
+func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("values(%v)", node.Name)
+}
+
+func (node *ValuesFuncExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ )
+}
+
+func (node *ValuesFuncExpr) replace(from, to Expr) bool {
+ return false
+}
+
+// SubstrExpr represents a call to SubstrExpr(column, value_expression) or SubstrExpr(column, value_expression,value_expression)
+// also supported syntax SubstrExpr(column from value_expression for value_expression)
+type SubstrExpr struct {
+ Name *ColName
+ From Expr
+ To Expr
+}
+
+// Format formats the node.
+func (node *SubstrExpr) Format(buf *TrackedBuffer) {
+
+ if node.To == nil {
+ buf.Myprintf("substr(%v, %v)", node.Name, node.From)
+ } else {
+ buf.Myprintf("substr(%v, %v, %v)", node.Name, node.From, node.To)
+ }
+}
+
+func (node *SubstrExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.From, &node.To)
+}
+
+func (node *SubstrExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ node.From,
+ node.To,
+ )
+}
+
+// ConvertExpr represents a call to CONVERT(expr, type)
+// or it's equivalent CAST(expr AS type). Both are rewritten to the former.
+type ConvertExpr struct {
+ Expr Expr
+ Type *ConvertType
+}
+
+// Format formats the node.
+func (node *ConvertExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("cast(%v as %v)", node.Expr, node.Type)
+}
+
+func (node *ConvertExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ node.Type,
+ )
+}
+
+func (node *ConvertExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// ConvertUsingExpr represents a call to CONVERT(expr USING charset).
+type ConvertUsingExpr struct {
+ Expr Expr
+ Type string
+}
+
+// Format formats the node.
+func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("convert(%v using %s)", node.Expr, node.Type)
+}
+
+func (node *ConvertUsingExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+func (node *ConvertUsingExpr) replace(from, to Expr) bool {
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// ConvertType represents the type in call to CONVERT(expr, type)
+type ConvertType struct {
+ Type string
+ Length *SQLVal
+ Scale *SQLVal
+ Operator string
+ Charset string
+}
+
+// this string is "character set" and this comment is required
+const (
+ CharacterSetStr = " character set"
+)
+
+// Format formats the node.
+func (node *ConvertType) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%s", node.Type)
+ if node.Length != nil {
+ buf.Myprintf("(%v", node.Length)
+ if node.Scale != nil {
+ buf.Myprintf(", %v", node.Scale)
+ }
+ buf.Myprintf(")")
+ }
+ if node.Charset != "" {
+ buf.Myprintf("%s %s", node.Operator, node.Charset)
+ }
+}
+
+func (node *ConvertType) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// MatchExpr represents a call to the MATCH function
+type MatchExpr struct {
+ Columns SelectExprs
+ Expr Expr
+ Option string
+}
+
+// MatchExpr.Option
+const (
+ BooleanModeStr = " in boolean mode"
+ NaturalLanguageModeStr = " in natural language mode"
+ NaturalLanguageModeWithQueryExpansionStr = " in natural language mode with query expansion"
+ QueryExpansionStr = " with query expansion"
+)
+
+// Format formats the node
+func (node *MatchExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("match(%v) against (%v%s)", node.Columns, node.Expr, node.Option)
+}
+
+func (node *MatchExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Columns,
+ node.Expr,
+ )
+}
+
+func (node *MatchExpr) replace(from, to Expr) bool {
+ for _, sel := range node.Columns {
+ aliased, ok := sel.(*AliasedExpr)
+ if !ok {
+ continue
+ }
+ if replaceExprs(from, to, &aliased.Expr) {
+ return true
+ }
+ }
+ return replaceExprs(from, to, &node.Expr)
+}
+
+// CaseExpr represents a CASE expression.
+type CaseExpr struct {
+ Expr Expr
+ Whens []*When
+ Else Expr
+}
+
+// Format formats the node.
+func (node *CaseExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("case ")
+ if node.Expr != nil {
+ buf.Myprintf("%v ", node.Expr)
+ }
+ for _, when := range node.Whens {
+ buf.Myprintf("%v ", when)
+ }
+ if node.Else != nil {
+ buf.Myprintf("else %v ", node.Else)
+ }
+ buf.Myprintf("end")
+}
+
+func (node *CaseExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ if err := Walk(visit, node.Expr); err != nil {
+ return err
+ }
+ for _, n := range node.Whens {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return Walk(visit, node.Else)
+}
+
+func (node *CaseExpr) replace(from, to Expr) bool {
+ for _, when := range node.Whens {
+ if replaceExprs(from, to, &when.Cond, &when.Val) {
+ return true
+ }
+ }
+ return replaceExprs(from, to, &node.Expr, &node.Else)
+}
+
+// Default represents a DEFAULT expression.
+type Default struct {
+ ColName string
+}
+
+// Format formats the node.
+func (node *Default) Format(buf *TrackedBuffer) {
+ buf.Myprintf("default")
+ if node.ColName != "" {
+ buf.Myprintf("(%s)", node.ColName)
+ }
+}
+
+func (node *Default) walkSubtree(visit Visit) error {
+ return nil
+}
+
+func (node *Default) replace(from, to Expr) bool {
+ return false
+}
+
+// When represents a WHEN sub-expression.
+type When struct {
+ Cond Expr
+ Val Expr
+}
+
+// Format formats the node.
+func (node *When) Format(buf *TrackedBuffer) {
+ buf.Myprintf("when %v then %v", node.Cond, node.Val)
+}
+
+func (node *When) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Cond,
+ node.Val,
+ )
+}
+
+// GroupBy represents a GROUP BY clause.
+type GroupBy []Expr
+
+// Format formats the node.
+func (node GroupBy) Format(buf *TrackedBuffer) {
+ prefix := " group by "
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node GroupBy) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// OrderBy represents an ORDER By clause.
+type OrderBy []*Order
+
+// Format formats the node.
+func (node OrderBy) Format(buf *TrackedBuffer) {
+ prefix := " order by "
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node OrderBy) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Order represents an ordering expression.
+type Order struct {
+ Expr Expr
+ Direction string
+}
+
+// Order.Direction
+const (
+ AscScr = "asc"
+ DescScr = "desc"
+)
+
+// Format formats the node.
+func (node *Order) Format(buf *TrackedBuffer) {
+ if node, ok := node.Expr.(*NullVal); ok {
+ buf.Myprintf("%v", node)
+ return
+ }
+ if node, ok := node.Expr.(*FuncExpr); ok {
+ if node.Name.Lowered() == "rand" {
+ buf.Myprintf("%v", node)
+ return
+ }
+ }
+
+ buf.Myprintf("%v %s", node.Expr, node.Direction)
+}
+
+func (node *Order) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Expr,
+ )
+}
+
+// Limit represents a LIMIT clause.
+type Limit struct {
+ Offset, Rowcount Expr
+}
+
+// Format formats the node.
+func (node *Limit) Format(buf *TrackedBuffer) {
+ if node == nil {
+ return
+ }
+ buf.Myprintf(" limit ")
+ if node.Offset != nil {
+ buf.Myprintf("%v, ", node.Offset)
+ }
+ buf.Myprintf("%v", node.Rowcount)
+}
+
+func (node *Limit) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Offset,
+ node.Rowcount,
+ )
+}
+
+// Values represents a VALUES clause.
+type Values []ValTuple
+
+// Format formats the node.
+func (node Values) Format(buf *TrackedBuffer) {
+ prefix := "values "
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node Values) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UpdateExprs represents a list of update expressions.
+type UpdateExprs []*UpdateExpr
+
+// Format formats the node.
+func (node UpdateExprs) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node UpdateExprs) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// UpdateExpr represents an update expression.
+type UpdateExpr struct {
+ Name *ColName
+ Expr Expr
+}
+
+// Format formats the node.
+func (node *UpdateExpr) Format(buf *TrackedBuffer) {
+ buf.Myprintf("%v = %v", node.Name, node.Expr)
+}
+
+func (node *UpdateExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ node.Expr,
+ )
+}
+
+// SetExprs represents a list of set expressions.
+type SetExprs []*SetExpr
+
+// Format formats the node.
+func (node SetExprs) Format(buf *TrackedBuffer) {
+ var prefix string
+ for _, n := range node {
+ buf.Myprintf("%s%v", prefix, n)
+ prefix = ", "
+ }
+}
+
+func (node SetExprs) walkSubtree(visit Visit) error {
+ for _, n := range node {
+ if err := Walk(visit, n); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// SetExpr represents a set expression.
+type SetExpr struct {
+ Name ColIdent
+ Expr Expr
+}
+
+// Format formats the node.
+func (node *SetExpr) Format(buf *TrackedBuffer) {
+ // We don't have to backtick set variable names.
+ if node.Name.EqualString("charset") || node.Name.EqualString("names") {
+ buf.Myprintf("%s %v", node.Name.String(), node.Expr)
+ } else {
+ buf.Myprintf("%s = %v", node.Name.String(), node.Expr)
+ }
+}
+
+func (node *SetExpr) walkSubtree(visit Visit) error {
+ if node == nil {
+ return nil
+ }
+ return Walk(
+ visit,
+ node.Name,
+ node.Expr,
+ )
+}
+
+// OnDup represents an ON DUPLICATE KEY clause.
+type OnDup UpdateExprs
+
+// Format formats the node.
+func (node OnDup) Format(buf *TrackedBuffer) {
+ if node == nil {
+ return
+ }
+ buf.Myprintf(" on duplicate key update %v", UpdateExprs(node))
+}
+
+func (node OnDup) walkSubtree(visit Visit) error {
+ return Walk(visit, UpdateExprs(node))
+}
+
+// ColIdent is a case insensitive SQL identifier. It will be escaped with
+// backquotes if necessary.
+type ColIdent struct {
+ // This artifact prevents this struct from being compared
+ // with itself. It consumes no space as long as it's not the
+ // last field in the struct.
+ _ [0]struct{ _ []byte }
+ val, lowered string
+}
+
+// NewColIdent makes a new ColIdent.
+func NewColIdent(str string) ColIdent {
+ return ColIdent{
+ val: str,
+ }
+}
+
+// Format formats the node.
+func (node ColIdent) Format(buf *TrackedBuffer) {
+ formatID(buf, node.val, node.Lowered())
+}
+
+func (node ColIdent) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// IsEmpty returns true if the name is empty.
+func (node ColIdent) IsEmpty() bool {
+ return node.val == ""
+}
+
+// String returns the unescaped column name. It must
+// not be used for SQL generation. Use sqlparser.String
+// instead. The Stringer conformance is for usage
+// in templates.
+func (node ColIdent) String() string {
+ return node.val
+}
+
+// CompliantName returns a compliant id name
+// that can be used for a bind var.
+func (node ColIdent) CompliantName() string {
+ return compliantName(node.val)
+}
+
+// Lowered returns a lower-cased column name.
+// This function should generally be used only for optimizing
+// comparisons.
+func (node ColIdent) Lowered() string {
+ if node.val == "" {
+ return ""
+ }
+ if node.lowered == "" {
+ node.lowered = strings.ToLower(node.val)
+ }
+ return node.lowered
+}
+
+// Equal performs a case-insensitive compare.
+func (node ColIdent) Equal(in ColIdent) bool {
+ return node.Lowered() == in.Lowered()
+}
+
+// EqualString performs a case-insensitive compare with str.
+func (node ColIdent) EqualString(str string) bool {
+ return node.Lowered() == strings.ToLower(str)
+}
+
+// MarshalJSON marshals into JSON.
+func (node ColIdent) MarshalJSON() ([]byte, error) {
+ return json.Marshal(node.val)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (node *ColIdent) UnmarshalJSON(b []byte) error {
+ var result string
+ err := json.Unmarshal(b, &result)
+ if err != nil {
+ return err
+ }
+ node.val = result
+ return nil
+}
+
+// TableIdent is a case sensitive SQL identifier. It will be escaped with
+// backquotes if necessary.
+type TableIdent struct {
+ v string
+}
+
+// NewTableIdent creates a new TableIdent.
+func NewTableIdent(str string) TableIdent {
+ return TableIdent{v: str}
+}
+
+// Format formats the node.
+func (node TableIdent) Format(buf *TrackedBuffer) {
+ formatID(buf, node.v, strings.ToLower(node.v))
+}
+
+func (node TableIdent) walkSubtree(visit Visit) error {
+ return nil
+}
+
+// IsEmpty returns true if TabIdent is empty.
+func (node TableIdent) IsEmpty() bool {
+ return node.v == ""
+}
+
+// String returns the unescaped table name. It must
+// not be used for SQL generation. Use sqlparser.String
+// instead. The Stringer conformance is for usage
+// in templates.
+func (node TableIdent) String() string {
+ return node.v
+}
+
+// CompliantName returns a compliant id name
+// that can be used for a bind var.
+func (node TableIdent) CompliantName() string {
+ return compliantName(node.v)
+}
+
+// MarshalJSON marshals into JSON.
+func (node TableIdent) MarshalJSON() ([]byte, error) {
+ return json.Marshal(node.v)
+}
+
+// UnmarshalJSON unmarshals from JSON.
+func (node *TableIdent) UnmarshalJSON(b []byte) error {
+ var result string
+ err := json.Unmarshal(b, &result)
+ if err != nil {
+ return err
+ }
+ node.v = result
+ return nil
+}
+
+// Backtick produces a backticked literal given an input string.
+func Backtick(in string) string {
+ var buf bytes.Buffer
+ buf.WriteByte('`')
+ for _, c := range in {
+ buf.WriteRune(c)
+ if c == '`' {
+ buf.WriteByte('`')
+ }
+ }
+ buf.WriteByte('`')
+ return buf.String()
+}
+
+func formatID(buf *TrackedBuffer, original, lowered string) {
+ isDbSystemVariable := false
+ if len(original) > 1 && original[:2] == "@@" {
+ isDbSystemVariable = true
+ }
+
+ for i, c := range original {
+ if !isLetter(uint16(c)) && (!isDbSystemVariable || !isCarat(uint16(c))) {
+ if i == 0 || !isDigit(uint16(c)) {
+ goto mustEscape
+ }
+ }
+ }
+ if _, ok := keywords[lowered]; ok {
+ goto mustEscape
+ }
+ buf.Myprintf("%s", original)
+ return
+
+mustEscape:
+ buf.WriteByte('`')
+ for _, c := range original {
+ buf.WriteRune(c)
+ if c == '`' {
+ buf.WriteByte('`')
+ }
+ }
+ buf.WriteByte('`')
+}
+
+func compliantName(in string) string {
+ var buf bytes.Buffer
+ for i, c := range in {
+ if !isLetter(uint16(c)) {
+ if i == 0 || !isDigit(uint16(c)) {
+ buf.WriteByte('_')
+ continue
+ }
+ }
+ buf.WriteRune(c)
+ }
+ return buf.String()
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/comments.go b/vendor/github.com/CovenantSQL/sqlparser/comments.go
new file mode 100644
index 000000000..a0f7f1b45
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/comments.go
@@ -0,0 +1,293 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+const (
+ // DirectiveMultiShardAutocommit is the query comment directive to allow
+ // single round trip autocommit with a multi-shard statement.
+ DirectiveMultiShardAutocommit = "MULTI_SHARD_AUTOCOMMIT"
+ // DirectiveSkipQueryPlanCache skips query plan cache when set.
+ DirectiveSkipQueryPlanCache = "SKIP_QUERY_PLAN_CACHE"
+ // DirectiveQueryTimeout sets a query timeout in vtgate. Only supported for SELECTS.
+ DirectiveQueryTimeout = "QUERY_TIMEOUT_MS"
+)
+
+func isNonSpace(r rune) bool {
+ return !unicode.IsSpace(r)
+}
+
+// leadingCommentEnd returns the first index after all leading comments, or
+// 0 if there are no leading comments.
+func leadingCommentEnd(text string) (end int) {
+ hasComment := false
+ pos := 0
+ for pos < len(text) {
+ // Eat up any whitespace. Trailing whitespace will be considered part of
+ // the leading comments.
+ nextVisibleOffset := strings.IndexFunc(text[pos:], isNonSpace)
+ if nextVisibleOffset < 0 {
+ break
+ }
+ pos += nextVisibleOffset
+ remainingText := text[pos:]
+
+ // Found visible characters. Look for '/*' at the beginning
+ // and '*/' somewhere after that.
+ if len(remainingText) < 4 || remainingText[:2] != "/*" {
+ break
+ }
+ commentLength := 4 + strings.Index(remainingText[2:], "*/")
+ if commentLength < 4 {
+ // Missing end comment :/
+ break
+ }
+
+ hasComment = true
+ pos += commentLength
+ }
+
+ if hasComment {
+ return pos
+ }
+ return 0
+}
+
+// trailingCommentStart returns the first index of trailing comments.
+// If there are no trailing comments, returns the length of the input string.
+func trailingCommentStart(text string) (start int) {
+ hasComment := false
+ reducedLen := len(text)
+ for reducedLen > 0 {
+ // Eat up any whitespace. Leading whitespace will be considered part of
+ // the trailing comments.
+ nextReducedLen := strings.LastIndexFunc(text[:reducedLen], isNonSpace) + 1
+ if nextReducedLen == 0 {
+ break
+ }
+ reducedLen = nextReducedLen
+ if reducedLen < 4 || text[reducedLen-2:reducedLen] != "*/" {
+ break
+ }
+
+ // Find the beginning of the comment
+ startCommentPos := strings.LastIndex(text[:reducedLen-2], "/*")
+ if startCommentPos < 0 {
+ // Badly formatted sql :/
+ break
+ }
+
+ hasComment = true
+ reducedLen = startCommentPos
+ }
+
+ if hasComment {
+ return reducedLen
+ }
+ return len(text)
+}
+
+// MarginComments holds the leading and trailing comments that surround a query.
+type MarginComments struct {
+ Leading string
+ Trailing string
+}
+
+// SplitMarginComments pulls out any leading or trailing comments from a raw sql query.
+// This function also trims leading (if there's a comment) and trailing whitespace.
+func SplitMarginComments(sql string) (query string, comments MarginComments) {
+ trailingStart := trailingCommentStart(sql)
+ leadingEnd := leadingCommentEnd(sql[:trailingStart])
+ comments = MarginComments{
+ Leading: strings.TrimLeftFunc(sql[:leadingEnd], unicode.IsSpace),
+ Trailing: strings.TrimRightFunc(sql[trailingStart:], unicode.IsSpace),
+ }
+ return strings.TrimFunc(sql[leadingEnd:trailingStart], unicode.IsSpace), comments
+}
+
+// StripLeadingComments trims the SQL string and removes any leading comments
+func StripLeadingComments(sql string) string {
+ sql = strings.TrimFunc(sql, unicode.IsSpace)
+
+ for hasCommentPrefix(sql) {
+ switch sql[0] {
+ case '/':
+ // Multi line comment
+ index := strings.Index(sql, "*/")
+ if index <= 1 {
+ return sql
+ }
+ // don't strip /*! ... */ or /*!50700 ... */
+ if len(sql) > 2 && sql[2] == '!' {
+ return sql
+ }
+ sql = sql[index+2:]
+ case '-':
+ // Single line comment
+ index := strings.Index(sql, "\n")
+ if index == -1 {
+ return sql
+ }
+ sql = sql[index+1:]
+ }
+
+ sql = strings.TrimFunc(sql, unicode.IsSpace)
+ }
+
+ return sql
+}
+
+func hasCommentPrefix(sql string) bool {
+ return len(sql) > 1 && ((sql[0] == '/' && sql[1] == '*') || (sql[0] == '-' && sql[1] == '-'))
+}
+
+// ExtractMysqlComment extracts the version and SQL from a comment-only query
+// such as /*!50708 sql here */
+func ExtractMysqlComment(sql string) (version string, innerSQL string) {
+ sql = sql[3 : len(sql)-2]
+
+ digitCount := 0
+ endOfVersionIndex := strings.IndexFunc(sql, func(c rune) bool {
+ digitCount++
+ return !unicode.IsDigit(c) || digitCount == 6
+ })
+ version = sql[0:endOfVersionIndex]
+ innerSQL = strings.TrimFunc(sql[endOfVersionIndex:], unicode.IsSpace)
+
+ return version, innerSQL
+}
+
+const commentDirectivePreamble = "/*vt+"
+
+// CommentDirectives is the parsed representation for execution directives
+// conveyed in query comments
+type CommentDirectives map[string]interface{}
+
+// ExtractCommentDirectives parses the comment list for any execution directives
+// of the form:
+//
+// /*vt+ OPTION_ONE=1 OPTION_TWO OPTION_THREE=abcd */
+//
+// It returns the map of the directive values or nil if there aren't any.
+func ExtractCommentDirectives(comments Comments) CommentDirectives {
+ if comments == nil {
+ return nil
+ }
+
+ var vals map[string]interface{}
+
+ for _, comment := range comments {
+ commentStr := string(comment)
+ if commentStr[0:5] != commentDirectivePreamble {
+ continue
+ }
+
+ if vals == nil {
+ vals = make(map[string]interface{})
+ }
+
+ // Split on whitespace and ignore the first and last directive
+ // since they contain the comment start/end
+ directives := strings.Fields(commentStr)
+ for i := 1; i < len(directives)-1; i++ {
+ directive := directives[i]
+ sep := strings.IndexByte(directive, '=')
+
+ // No value is equivalent to a true boolean
+ if sep == -1 {
+ vals[directive] = true
+ continue
+ }
+
+ strVal := directive[sep+1:]
+ directive = directive[:sep]
+
+ intVal, err := strconv.Atoi(strVal)
+ if err == nil {
+ vals[directive] = intVal
+ continue
+ }
+
+ boolVal, err := strconv.ParseBool(strVal)
+ if err == nil {
+ vals[directive] = boolVal
+ continue
+ }
+
+ vals[directive] = strVal
+ }
+ }
+ return vals
+}
+
+// IsSet checks the directive map for the named directive and returns
+// true if the directive is set and has a true/false or 0/1 value
+func (d CommentDirectives) IsSet(key string) bool {
+ if d == nil {
+ return false
+ }
+
+ val, ok := d[key]
+ if !ok {
+ return false
+ }
+
+ boolVal, ok := val.(bool)
+ if ok {
+ return boolVal
+ }
+
+ intVal, ok := val.(int)
+ if ok {
+ return intVal == 1
+ }
+ return false
+}
+
+// SkipQueryPlanCacheDirective returns true if skip query plan cache directive is set to true in query.
+func SkipQueryPlanCacheDirective(stmt Statement) bool {
+ switch stmt := stmt.(type) {
+ case *Select:
+ directives := ExtractCommentDirectives(stmt.Comments)
+ if directives.IsSet(DirectiveSkipQueryPlanCache) {
+ return true
+ }
+ case *Insert:
+ directives := ExtractCommentDirectives(stmt.Comments)
+ if directives.IsSet(DirectiveSkipQueryPlanCache) {
+ return true
+ }
+ case *Update:
+ directives := ExtractCommentDirectives(stmt.Comments)
+ if directives.IsSet(DirectiveSkipQueryPlanCache) {
+ return true
+ }
+ case *Delete:
+ directives := ExtractCommentDirectives(stmt.Comments)
+ if directives.IsSet(DirectiveSkipQueryPlanCache) {
+ return true
+ }
+ default:
+ return false
+ }
+ return false
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/bytes2/buffer.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/bytes2/buffer.go
new file mode 100644
index 000000000..72f8fc6e4
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/bytes2/buffer.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bytes2
+
+// Buffer implements a subset of the write portion of
+// bytes.Buffer, but more efficiently. This is meant to
+// be used in very high QPS operations, especially for
+// WriteByte, and without abstracting it as a Writer.
+// Function signatures contain errors for compatibility,
+// but they do not return errors.
+type Buffer struct {
+ bytes []byte
+}
+
+// NewBuffer is equivalent to bytes.NewBuffer.
+func NewBuffer(b []byte) *Buffer {
+ return &Buffer{bytes: b}
+}
+
+// Write is equivalent to bytes.Buffer.Write.
+func (buf *Buffer) Write(b []byte) (int, error) {
+ buf.bytes = append(buf.bytes, b...)
+ return len(b), nil
+}
+
+// WriteString is equivalent to bytes.Buffer.WriteString.
+func (buf *Buffer) WriteString(s string) (int, error) {
+ buf.bytes = append(buf.bytes, s...)
+ return len(s), nil
+}
+
+// WriteByte is equivalent to bytes.Buffer.WriteByte.
+func (buf *Buffer) WriteByte(b byte) error {
+ buf.bytes = append(buf.bytes, b)
+ return nil
+}
+
+// Bytes is equivalent to bytes.Buffer.Bytes.
+func (buf *Buffer) Bytes() []byte {
+ return buf.bytes
+}
+
+// Strings is equivalent to bytes.Buffer.Strings.
+func (buf *Buffer) String() string {
+ return string(buf.bytes)
+}
+
+// Len is equivalent to bytes.Buffer.Len.
+func (buf *Buffer) Len() int {
+ return len(buf.bytes)
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/hack/hack.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/hack/hack.go
new file mode 100644
index 000000000..e6344ad99
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/hack/hack.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package hack gives you some efficient functionality at the cost of
+// breaking some Go rules.
+package hack
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// StringArena lets you consolidate allocations for a group of strings
+// that have similar life length
+type StringArena struct {
+ buf []byte
+ str string
+}
+
+// NewStringArena creates an arena of the specified size.
+func NewStringArena(size int) *StringArena {
+ sa := &StringArena{buf: make([]byte, 0, size)}
+ pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&sa.buf))
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&sa.str))
+ pstring.Data = pbytes.Data
+ pstring.Len = pbytes.Cap
+ return sa
+}
+
+// NewString copies a byte slice into the arena and returns it as a string.
+// If the arena is full, it returns a traditional go string.
+func (sa *StringArena) NewString(b []byte) string {
+ if len(b) == 0 {
+ return ""
+ }
+ if len(sa.buf)+len(b) > cap(sa.buf) {
+ return string(b)
+ }
+ start := len(sa.buf)
+ sa.buf = append(sa.buf, b...)
+ return sa.str[start : start+len(b)]
+}
+
+// SpaceLeft returns the amount of space left in the arena.
+func (sa *StringArena) SpaceLeft() int {
+ return cap(sa.buf) - len(sa.buf)
+}
+
+// String force casts a []byte to a string.
+// USE AT YOUR OWN RISK
+func String(b []byte) (s string) {
+ if len(b) == 0 {
+ return ""
+ }
+ pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ pstring.Data = pbytes.Data
+ pstring.Len = pbytes.Len
+ return
+}
+
+// StringPointer returns &s[0], which is not allowed in go
+func StringPointer(s string) unsafe.Pointer {
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ return unsafe.Pointer(pstring.Data)
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/querypb/query.pb.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/querypb/query.pb.go
new file mode 100644
index 000000000..db9f924c1
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/querypb/query.pb.go
@@ -0,0 +1,2734 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: query.proto
+
+/*
+Package query is a generated protocol buffer package.
+
+It is generated from these files:
+ query.proto
+
+It has these top-level messages:
+ Target
+ VTGateCallerID
+ EventToken
+ Value
+ BindVariable
+ BoundQuery
+ ExecuteOptions
+ Field
+ Row
+ ResultExtras
+ QueryResult
+ StreamEvent
+ ExecuteRequest
+ ExecuteResponse
+ ResultWithError
+ ExecuteBatchRequest
+ ExecuteBatchResponse
+ StreamExecuteRequest
+ StreamExecuteResponse
+ BeginRequest
+ BeginResponse
+ CommitRequest
+ CommitResponse
+ RollbackRequest
+ RollbackResponse
+ PrepareRequest
+ PrepareResponse
+ CommitPreparedRequest
+ CommitPreparedResponse
+ RollbackPreparedRequest
+ RollbackPreparedResponse
+ CreateTransactionRequest
+ CreateTransactionResponse
+ StartCommitRequest
+ StartCommitResponse
+ SetRollbackRequest
+ SetRollbackResponse
+ ConcludeTransactionRequest
+ ConcludeTransactionResponse
+ ReadTransactionRequest
+ ReadTransactionResponse
+ BeginExecuteRequest
+ BeginExecuteResponse
+ BeginExecuteBatchRequest
+ BeginExecuteBatchResponse
+ MessageStreamRequest
+ MessageStreamResponse
+ MessageAckRequest
+ MessageAckResponse
+ SplitQueryRequest
+ QuerySplit
+ SplitQueryResponse
+ StreamHealthRequest
+ RealtimeStats
+ StreamHealthResponse
+ UpdateStreamRequest
+ UpdateStreamResponse
+ TransactionMetadata
+*/
+package querypb
+
+import "strconv"
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// Flags sent from the MySQL C API
+type MySqlFlag int32
+
+const (
+ MySqlFlag_EMPTY MySqlFlag = 0
+ MySqlFlag_NOT_NULL_FLAG MySqlFlag = 1
+ MySqlFlag_PRI_KEY_FLAG MySqlFlag = 2
+ MySqlFlag_UNIQUE_KEY_FLAG MySqlFlag = 4
+ MySqlFlag_MULTIPLE_KEY_FLAG MySqlFlag = 8
+ MySqlFlag_BLOB_FLAG MySqlFlag = 16
+ MySqlFlag_UNSIGNED_FLAG MySqlFlag = 32
+ MySqlFlag_ZEROFILL_FLAG MySqlFlag = 64
+ MySqlFlag_BINARY_FLAG MySqlFlag = 128
+ MySqlFlag_ENUM_FLAG MySqlFlag = 256
+ MySqlFlag_AUTO_INCREMENT_FLAG MySqlFlag = 512
+ MySqlFlag_TIMESTAMP_FLAG MySqlFlag = 1024
+ MySqlFlag_SET_FLAG MySqlFlag = 2048
+ MySqlFlag_NO_DEFAULT_VALUE_FLAG MySqlFlag = 4096
+ MySqlFlag_ON_UPDATE_NOW_FLAG MySqlFlag = 8192
+ MySqlFlag_NUM_FLAG MySqlFlag = 32768
+ MySqlFlag_PART_KEY_FLAG MySqlFlag = 16384
+ MySqlFlag_GROUP_FLAG MySqlFlag = 32768
+ MySqlFlag_UNIQUE_FLAG MySqlFlag = 65536
+ MySqlFlag_BINCMP_FLAG MySqlFlag = 131072
+)
+
+var MySqlFlag_name = map[int32]string{
+ 0: "EMPTY",
+ 1: "NOT_NULL_FLAG",
+ 2: "PRI_KEY_FLAG",
+ 4: "UNIQUE_KEY_FLAG",
+ 8: "MULTIPLE_KEY_FLAG",
+ 16: "BLOB_FLAG",
+ 32: "UNSIGNED_FLAG",
+ 64: "ZEROFILL_FLAG",
+ 128: "BINARY_FLAG",
+ 256: "ENUM_FLAG",
+ 512: "AUTO_INCREMENT_FLAG",
+ 1024: "TIMESTAMP_FLAG",
+ 2048: "SET_FLAG",
+ 4096: "NO_DEFAULT_VALUE_FLAG",
+ 8192: "ON_UPDATE_NOW_FLAG",
+ 32768: "NUM_FLAG",
+ 16384: "PART_KEY_FLAG",
+ // Duplicate value: 32768: "GROUP_FLAG",
+ 65536: "UNIQUE_FLAG",
+ 131072: "BINCMP_FLAG",
+}
+var MySqlFlag_value = map[string]int32{
+ "EMPTY": 0,
+ "NOT_NULL_FLAG": 1,
+ "PRI_KEY_FLAG": 2,
+ "UNIQUE_KEY_FLAG": 4,
+ "MULTIPLE_KEY_FLAG": 8,
+ "BLOB_FLAG": 16,
+ "UNSIGNED_FLAG": 32,
+ "ZEROFILL_FLAG": 64,
+ "BINARY_FLAG": 128,
+ "ENUM_FLAG": 256,
+ "AUTO_INCREMENT_FLAG": 512,
+ "TIMESTAMP_FLAG": 1024,
+ "SET_FLAG": 2048,
+ "NO_DEFAULT_VALUE_FLAG": 4096,
+ "ON_UPDATE_NOW_FLAG": 8192,
+ "NUM_FLAG": 32768,
+ "PART_KEY_FLAG": 16384,
+ "GROUP_FLAG": 32768,
+ "UNIQUE_FLAG": 65536,
+ "BINCMP_FLAG": 131072,
+}
+
+func (x MySqlFlag) String() string {
+ return EnumName(MySqlFlag_name, int32(x))
+}
+
+// Flag allows us to qualify types by their common properties.
+type Flag int32
+
+const (
+ Flag_NONE Flag = 0
+ Flag_ISINTEGRAL Flag = 256
+ Flag_ISUNSIGNED Flag = 512
+ Flag_ISFLOAT Flag = 1024
+ Flag_ISQUOTED Flag = 2048
+ Flag_ISTEXT Flag = 4096
+ Flag_ISBINARY Flag = 8192
+)
+
+var Flag_name = map[int32]string{
+ 0: "NONE",
+ 256: "ISINTEGRAL",
+ 512: "ISUNSIGNED",
+ 1024: "ISFLOAT",
+ 2048: "ISQUOTED",
+ 4096: "ISTEXT",
+ 8192: "ISBINARY",
+}
+var Flag_value = map[string]int32{
+ "NONE": 0,
+ "ISINTEGRAL": 256,
+ "ISUNSIGNED": 512,
+ "ISFLOAT": 1024,
+ "ISQUOTED": 2048,
+ "ISTEXT": 4096,
+ "ISBINARY": 8192,
+}
+
+func (x Flag) String() string {
+ return EnumName(Flag_name, int32(x))
+}
+
+// Type defines the various supported data types in bind vars
+// and query results.
+type Type int32
+
+const (
+ // NULL_TYPE specifies a NULL type.
+ Type_NULL_TYPE Type = 0
+ // INT8 specifies a TINYINT type.
+ // Properties: 1, IsNumber.
+ Type_INT8 Type = 257
+ // UINT8 specifies a TINYINT UNSIGNED type.
+ // Properties: 2, IsNumber, IsUnsigned.
+ Type_UINT8 Type = 770
+ // INT16 specifies a SMALLINT type.
+ // Properties: 3, IsNumber.
+ Type_INT16 Type = 259
+ // UINT16 specifies a SMALLINT UNSIGNED type.
+ // Properties: 4, IsNumber, IsUnsigned.
+ Type_UINT16 Type = 772
+ // INT24 specifies a MEDIUMINT type.
+ // Properties: 5, IsNumber.
+ Type_INT24 Type = 261
+ // UINT24 specifies a MEDIUMINT UNSIGNED type.
+ // Properties: 6, IsNumber, IsUnsigned.
+ Type_UINT24 Type = 774
+ // INT32 specifies a INTEGER type.
+ // Properties: 7, IsNumber.
+ Type_INT32 Type = 263
+ // UINT32 specifies a INTEGER UNSIGNED type.
+ // Properties: 8, IsNumber, IsUnsigned.
+ Type_UINT32 Type = 776
+ // INT64 specifies a BIGINT type.
+ // Properties: 9, IsNumber.
+ Type_INT64 Type = 265
+ // UINT64 specifies a BIGINT UNSIGNED type.
+ // Properties: 10, IsNumber, IsUnsigned.
+ Type_UINT64 Type = 778
+ // FLOAT32 specifies a FLOAT type.
+ // Properties: 11, IsFloat.
+ Type_FLOAT32 Type = 1035
+ // FLOAT64 specifies a DOUBLE or REAL type.
+ // Properties: 12, IsFloat.
+ Type_FLOAT64 Type = 1036
+ // TIMESTAMP specifies a TIMESTAMP type.
+ // Properties: 13, IsQuoted.
+ Type_TIMESTAMP Type = 2061
+ // DATE specifies a DATE type.
+ // Properties: 14, IsQuoted.
+ Type_DATE Type = 2062
+ // TIME specifies a TIME type.
+ // Properties: 15, IsQuoted.
+ Type_TIME Type = 2063
+ // DATETIME specifies a DATETIME type.
+ // Properties: 16, IsQuoted.
+ Type_DATETIME Type = 2064
+ // YEAR specifies a YEAR type.
+ // Properties: 17, IsNumber, IsUnsigned.
+ Type_YEAR Type = 785
+ // DECIMAL specifies a DECIMAL or NUMERIC type.
+ // Properties: 18, None.
+ Type_DECIMAL Type = 18
+ // TEXT specifies a TEXT type.
+ // Properties: 19, IsQuoted, IsText.
+ Type_TEXT Type = 6163
+ // BLOB specifies a BLOB type.
+ // Properties: 20, IsQuoted, IsBinary.
+ Type_BLOB Type = 10260
+ // VARCHAR specifies a VARCHAR type.
+ // Properties: 21, IsQuoted, IsText.
+ Type_VARCHAR Type = 6165
+ // VARBINARY specifies a VARBINARY type.
+ // Properties: 22, IsQuoted, IsBinary.
+ Type_VARBINARY Type = 10262
+ // CHAR specifies a CHAR type.
+ // Properties: 23, IsQuoted, IsText.
+ Type_CHAR Type = 6167
+ // BINARY specifies a BINARY type.
+ // Properties: 24, IsQuoted, IsBinary.
+ Type_BINARY Type = 10264
+ // BIT specifies a BIT type.
+ // Properties: 25, IsQuoted.
+ Type_BIT Type = 2073
+ // ENUM specifies an ENUM type.
+ // Properties: 26, IsQuoted.
+ Type_ENUM Type = 2074
+ // SET specifies a SET type.
+ // Properties: 27, IsQuoted.
+ Type_SET Type = 2075
+ // TUPLE specifies a a tuple. This cannot
+ // be returned in a QueryResult, but it can
+ // be sent as a bind var.
+ // Properties: 28, None.
+ Type_TUPLE Type = 28
+ // GEOMETRY specifies a GEOMETRY type.
+ // Properties: 29, IsQuoted.
+ Type_GEOMETRY Type = 2077
+ // JSON specifies a JSON type.
+ // Properties: 30, IsQuoted.
+ Type_JSON Type = 2078
+ // EXPRESSION specifies a SQL expression.
+ // This type is for internal use only.
+ // Properties: 31, None.
+ Type_EXPRESSION Type = 31
+)
+
+var Type_name = map[int32]string{
+ 0: "NULL_TYPE",
+ 257: "INT8",
+ 770: "UINT8",
+ 259: "INT16",
+ 772: "UINT16",
+ 261: "INT24",
+ 774: "UINT24",
+ 263: "INT32",
+ 776: "UINT32",
+ 265: "INT64",
+ 778: "UINT64",
+ 1035: "FLOAT32",
+ 1036: "FLOAT64",
+ 2061: "TIMESTAMP",
+ 2062: "DATE",
+ 2063: "TIME",
+ 2064: "DATETIME",
+ 785: "YEAR",
+ 18: "DECIMAL",
+ 6163: "TEXT",
+ 10260: "BLOB",
+ 6165: "VARCHAR",
+ 10262: "VARBINARY",
+ 6167: "CHAR",
+ 10264: "BINARY",
+ 2073: "BIT",
+ 2074: "ENUM",
+ 2075: "SET",
+ 28: "TUPLE",
+ 2077: "GEOMETRY",
+ 2078: "JSON",
+ 31: "EXPRESSION",
+}
+var Type_value = map[string]int32{
+ "NULL_TYPE": 0,
+ "INT8": 257,
+ "UINT8": 770,
+ "INT16": 259,
+ "UINT16": 772,
+ "INT24": 261,
+ "UINT24": 774,
+ "INT32": 263,
+ "UINT32": 776,
+ "INT64": 265,
+ "UINT64": 778,
+ "FLOAT32": 1035,
+ "FLOAT64": 1036,
+ "TIMESTAMP": 2061,
+ "DATE": 2062,
+ "TIME": 2063,
+ "DATETIME": 2064,
+ "YEAR": 785,
+ "DECIMAL": 18,
+ "TEXT": 6163,
+ "BLOB": 10260,
+ "VARCHAR": 6165,
+ "VARBINARY": 10262,
+ "CHAR": 6167,
+ "BINARY": 10264,
+ "BIT": 2073,
+ "ENUM": 2074,
+ "SET": 2075,
+ "TUPLE": 28,
+ "GEOMETRY": 2077,
+ "JSON": 2078,
+ "EXPRESSION": 31,
+}
+
+func (x Type) String() string {
+ return EnumName(Type_name, int32(x))
+}
+
+// TransactionState represents the state of a distributed transaction.
+type TransactionState int32
+
+const (
+ TransactionState_UNKNOWN TransactionState = 0
+ TransactionState_PREPARE TransactionState = 1
+ TransactionState_COMMIT TransactionState = 2
+ TransactionState_ROLLBACK TransactionState = 3
+)
+
+var TransactionState_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "PREPARE",
+ 2: "COMMIT",
+ 3: "ROLLBACK",
+}
+var TransactionState_value = map[string]int32{
+ "UNKNOWN": 0,
+ "PREPARE": 1,
+ "COMMIT": 2,
+ "ROLLBACK": 3,
+}
+
+func (x TransactionState) String() string {
+ return EnumName(TransactionState_name, int32(x))
+}
+
+type ExecuteOptions_IncludedFields int32
+
+const (
+ ExecuteOptions_TYPE_AND_NAME ExecuteOptions_IncludedFields = 0
+ ExecuteOptions_TYPE_ONLY ExecuteOptions_IncludedFields = 1
+ ExecuteOptions_ALL ExecuteOptions_IncludedFields = 2
+)
+
+var ExecuteOptions_IncludedFields_name = map[int32]string{
+ 0: "TYPE_AND_NAME",
+ 1: "TYPE_ONLY",
+ 2: "ALL",
+}
+var ExecuteOptions_IncludedFields_value = map[string]int32{
+ "TYPE_AND_NAME": 0,
+ "TYPE_ONLY": 1,
+ "ALL": 2,
+}
+
+func (x ExecuteOptions_IncludedFields) String() string {
+ return EnumName(ExecuteOptions_IncludedFields_name, int32(x))
+}
+
+type ExecuteOptions_Workload int32
+
+const (
+ ExecuteOptions_UNSPECIFIED ExecuteOptions_Workload = 0
+ ExecuteOptions_OLTP ExecuteOptions_Workload = 1
+ ExecuteOptions_OLAP ExecuteOptions_Workload = 2
+ ExecuteOptions_DBA ExecuteOptions_Workload = 3
+)
+
+var ExecuteOptions_Workload_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "OLTP",
+ 2: "OLAP",
+ 3: "DBA",
+}
+var ExecuteOptions_Workload_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "OLTP": 1,
+ "OLAP": 2,
+ "DBA": 3,
+}
+
+func (x ExecuteOptions_Workload) String() string {
+ return EnumName(ExecuteOptions_Workload_name, int32(x))
+}
+
+type ExecuteOptions_TransactionIsolation int32
+
+const (
+ ExecuteOptions_DEFAULT ExecuteOptions_TransactionIsolation = 0
+ ExecuteOptions_REPEATABLE_READ ExecuteOptions_TransactionIsolation = 1
+ ExecuteOptions_READ_COMMITTED ExecuteOptions_TransactionIsolation = 2
+ ExecuteOptions_READ_UNCOMMITTED ExecuteOptions_TransactionIsolation = 3
+ ExecuteOptions_SERIALIZABLE ExecuteOptions_TransactionIsolation = 4
+)
+
+var ExecuteOptions_TransactionIsolation_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "REPEATABLE_READ",
+ 2: "READ_COMMITTED",
+ 3: "READ_UNCOMMITTED",
+ 4: "SERIALIZABLE",
+}
+var ExecuteOptions_TransactionIsolation_value = map[string]int32{
+ "DEFAULT": 0,
+ "REPEATABLE_READ": 1,
+ "READ_COMMITTED": 2,
+ "READ_UNCOMMITTED": 3,
+ "SERIALIZABLE": 4,
+}
+
+func (x ExecuteOptions_TransactionIsolation) String() string {
+ return EnumName(ExecuteOptions_TransactionIsolation_name, int32(x))
+}
+
+// The category of one statement.
+type StreamEvent_Statement_Category int32
+
+const (
+ StreamEvent_Statement_Error StreamEvent_Statement_Category = 0
+ StreamEvent_Statement_DML StreamEvent_Statement_Category = 1
+ StreamEvent_Statement_DDL StreamEvent_Statement_Category = 2
+)
+
+var StreamEvent_Statement_Category_name = map[int32]string{
+ 0: "Error",
+ 1: "DML",
+ 2: "DDL",
+}
+var StreamEvent_Statement_Category_value = map[string]int32{
+ "Error": 0,
+ "DML": 1,
+ "DDL": 2,
+}
+
+func (x StreamEvent_Statement_Category) String() string {
+ return EnumName(StreamEvent_Statement_Category_name, int32(x))
+}
+
+type SplitQueryRequest_Algorithm int32
+
+const (
+ SplitQueryRequest_EQUAL_SPLITS SplitQueryRequest_Algorithm = 0
+ SplitQueryRequest_FULL_SCAN SplitQueryRequest_Algorithm = 1
+)
+
+var SplitQueryRequest_Algorithm_name = map[int32]string{
+ 0: "EQUAL_SPLITS",
+ 1: "FULL_SCAN",
+}
+var SplitQueryRequest_Algorithm_value = map[string]int32{
+ "EQUAL_SPLITS": 0,
+ "FULL_SCAN": 1,
+}
+
+func (x SplitQueryRequest_Algorithm) String() string {
+ return EnumName(SplitQueryRequest_Algorithm_name, int32(x))
+}
+
+/*
+// Target describes what the client expects the tablet is.
+// If the tablet does not match, an error is returned.
+type Target struct {
+ Keyspace string `protobuf:"bytes,1,opt,name=keyspace" json:"keyspace,omitempty"`
+ Shard string `protobuf:"bytes,2,opt,name=shard" json:"shard,omitempty"`
+ TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,enum=topodata.TabletType" json:"tablet_type,omitempty"`
+}
+
+func (m *Target) Reset() { *m = Target{} }
+func (m *Target) String() string { return "TODO" }
+
+
+
+func (m *Target) GetKeyspace() string {
+ if m != nil {
+ return m.Keyspace
+ }
+ return ""
+}
+
+func (m *Target) GetShard() string {
+ if m != nil {
+ return m.Shard
+ }
+ return ""
+}
+
+func (m *Target) GetTabletType() topodata.TabletType {
+ if m != nil {
+ return m.TabletType
+ }
+ return topodata.TabletType_UNKNOWN
+}
+
+
+// VTGateCallerID is sent by VTGate to VTTablet to describe the
+// caller. If possible, this information is secure. For instance,
+// if using unique certificates that guarantee that VTGate->VTTablet
+// traffic cannot be spoofed, then VTTablet can trust this information,
+// and VTTablet will use it for tablet ACLs, for instance.
+// Because of this security guarantee, this is different than the CallerID
+// structure, which is not secure at all, because it is provided
+// by the Vitess client.
+type VTGateCallerID struct {
+ Username string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"`
+ Groups []string `protobuf:"bytes,2,rep,name=groups" json:"groups,omitempty"`
+}
+
+func (m *VTGateCallerID) Reset() { *m = VTGateCallerID{} }
+func (m *VTGateCallerID) String() string { return "TODO" }
+
+
+
+func (m *VTGateCallerID) GetUsername() string {
+ if m != nil {
+ return m.Username
+ }
+ return ""
+}
+
+func (m *VTGateCallerID) GetGroups() []string {
+ if m != nil {
+ return m.Groups
+ }
+ return nil
+}
+
+// EventToken is a structure that describes a point in time in a
+// replication stream on one shard. The most recent known replication
+// position can be retrieved from vttablet when executing a query. It
+// is also sent with the replication streams from the binlog service.
+type EventToken struct {
+ // timestamp is the MySQL timestamp of the statements. Seconds since Epoch.
+ Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"`
+ // The shard name that applied the statements. Note this is not set when
+ // streaming from a vttablet. It is only used on the client -> vtgate link.
+ Shard string `protobuf:"bytes,2,opt,name=shard" json:"shard,omitempty"`
+ // The position on the replication stream after this statement was applied.
+ // It is not the transaction ID / GTID, but the position / GTIDSet.
+ Position string `protobuf:"bytes,3,opt,name=position" json:"position,omitempty"`
+}
+
+func (m *EventToken) Reset() { *m = EventToken{} }
+func (m *EventToken) String() string { return "TODO" }
+
+
+
+func (m *EventToken) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+func (m *EventToken) GetShard() string {
+ if m != nil {
+ return m.Shard
+ }
+ return ""
+}
+
+func (m *EventToken) GetPosition() string {
+ if m != nil {
+ return m.Position
+ }
+ return ""
+}
+*/
+
+// Value represents a typed value.
+type Value struct {
+ Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return "TODO" }
+
+func (m *Value) GetType() Type {
+ if m != nil {
+ return m.Type
+ }
+ return Type_NULL_TYPE
+}
+
+func (m *Value) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+// BindVariable represents a single bind variable in a Query.
+type BindVariable struct {
+ Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ // values are set if type is TUPLE.
+ Values []*Value `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"`
+}
+
+func (m *BindVariable) Reset() { *m = BindVariable{} }
+func (m *BindVariable) String() string { return "TODO" }
+
+func (m *BindVariable) GetType() Type {
+ if m != nil {
+ return m.Type
+ }
+ return Type_NULL_TYPE
+}
+
+func (m *BindVariable) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *BindVariable) GetValues() []*Value {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+// BoundQuery is a query with its bind variables
+type BoundQuery struct {
+ // sql is the SQL query to execute
+ Sql string `protobuf:"bytes,1,opt,name=sql" json:"sql,omitempty"`
+ // bind_variables is a map of all bind variables to expand in the query.
+ // nil values are not allowed. Use NULL_TYPE to express a NULL value.
+ BindVariables map[string]*BindVariable `protobuf:"bytes,2,rep,name=bind_variables,json=bindVariables" json:"bind_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *BoundQuery) Reset() { *m = BoundQuery{} }
+func (m *BoundQuery) String() string { return "TODO" }
+
+func (m *BoundQuery) GetSql() string {
+ if m != nil {
+ return m.Sql
+ }
+ return ""
+}
+
+func (m *BoundQuery) GetBindVariables() map[string]*BindVariable {
+ if m != nil {
+ return m.BindVariables
+ }
+ return nil
+}
+
+/*
+// ExecuteOptions is passed around for all Execute calls.
+type ExecuteOptions struct {
+ // If set, we will try to include an EventToken with the responses.
+ IncludeEventToken bool `protobuf:"varint,2,opt,name=include_event_token,json=includeEventToken" json:"include_event_token,omitempty"`
+ // If set, the fresher field may be set as a result comparison to this token.
+ // This is a shortcut so the application doesn't need to care about
+ // comparing EventTokens.
+ CompareEventToken *EventToken `protobuf:"bytes,3,opt,name=compare_event_token,json=compareEventToken" json:"compare_event_token,omitempty"`
+ // Controls what fields are returned in Field message responses from mysql, i.e.
+ // field name, table name, etc. This is an optimization for high-QPS queries where
+ // the client knows what it's getting
+ IncludedFields ExecuteOptions_IncludedFields `protobuf:"varint,4,opt,name=included_fields,json=includedFields,enum=query.ExecuteOptions_IncludedFields" json:"included_fields,omitempty"`
+ // client_rows_found specifies if rows_affected should return
+ // rows found instead of rows affected. Behavior is defined
+ // by MySQL's CLIENT_FOUND_ROWS flag.
+ ClientFoundRows bool `protobuf:"varint,5,opt,name=client_found_rows,json=clientFoundRows" json:"client_found_rows,omitempty"`
+ // workload specifies the type of workload:
+ // OLTP: DMLs allowed, results have row count limit, and
+ // query timeouts are shorter.
+ // OLAP: DMLS not allowed, no limit on row count, timeouts
+ // can be as high as desired.
+ // DBA: no limit on rowcount or timeout, all queries allowed
+ // but intended for long DMLs and DDLs.
+ Workload ExecuteOptions_Workload `protobuf:"varint,6,opt,name=workload,enum=query.ExecuteOptions_Workload" json:"workload,omitempty"`
+ // sql_select_limit sets an implicit limit on all select statements. Since
+ // vitess also sets a rowcount limit on queries, the smallest value wins.
+ SqlSelectLimit int64 `protobuf:"varint,8,opt,name=sql_select_limit,json=sqlSelectLimit" json:"sql_select_limit,omitempty"`
+ TransactionIsolation ExecuteOptions_TransactionIsolation `protobuf:"varint,9,opt,name=transaction_isolation,json=transactionIsolation,enum=query.ExecuteOptions_TransactionIsolation" json:"transaction_isolation,omitempty"`
+}
+
+func (m *ExecuteOptions) Reset() { *m = ExecuteOptions{} }
+func (m *ExecuteOptions) String() string { return "TODO" }
+
+
+
+func (m *ExecuteOptions) GetIncludeEventToken() bool {
+ if m != nil {
+ return m.IncludeEventToken
+ }
+ return false
+}
+
+func (m *ExecuteOptions) GetCompareEventToken() *EventToken {
+ if m != nil {
+ return m.CompareEventToken
+ }
+ return nil
+}
+
+func (m *ExecuteOptions) GetIncludedFields() ExecuteOptions_IncludedFields {
+ if m != nil {
+ return m.IncludedFields
+ }
+ return ExecuteOptions_TYPE_AND_NAME
+}
+
+func (m *ExecuteOptions) GetClientFoundRows() bool {
+ if m != nil {
+ return m.ClientFoundRows
+ }
+ return false
+}
+
+func (m *ExecuteOptions) GetWorkload() ExecuteOptions_Workload {
+ if m != nil {
+ return m.Workload
+ }
+ return ExecuteOptions_UNSPECIFIED
+}
+
+func (m *ExecuteOptions) GetSqlSelectLimit() int64 {
+ if m != nil {
+ return m.SqlSelectLimit
+ }
+ return 0
+}
+
+func (m *ExecuteOptions) GetTransactionIsolation() ExecuteOptions_TransactionIsolation {
+ if m != nil {
+ return m.TransactionIsolation
+ }
+ return ExecuteOptions_DEFAULT
+}
+
+// Field describes a single column returned by a query
+type Field struct {
+ // name of the field as returned by mysql C API
+ Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // vitess-defined type. Conversion function is in sqltypes package.
+ Type Type `protobuf:"varint,2,opt,name=type,enum=query.Type" json:"type,omitempty"`
+ // Remaining fields from mysql C API.
+ // These fields are only populated when ExecuteOptions.included_fields
+ // is set to IncludedFields.ALL.
+ Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"`
+ OrgTable string `protobuf:"bytes,4,opt,name=org_table,json=orgTable" json:"org_table,omitempty"`
+ Database string `protobuf:"bytes,5,opt,name=database" json:"database,omitempty"`
+ OrgName string `protobuf:"bytes,6,opt,name=org_name,json=orgName" json:"org_name,omitempty"`
+ // column_length is really a uint32. All 32 bits can be used.
+ ColumnLength uint32 `protobuf:"varint,7,opt,name=column_length,json=columnLength" json:"column_length,omitempty"`
+ // charset is actually a uint16. Only the lower 16 bits are used.
+ Charset uint32 `protobuf:"varint,8,opt,name=charset" json:"charset,omitempty"`
+ // decimals is actualy a uint8. Only the lower 8 bits are used.
+ Decimals uint32 `protobuf:"varint,9,opt,name=decimals" json:"decimals,omitempty"`
+ // flags is actually a uint16. Only the lower 16 bits are used.
+ Flags uint32 `protobuf:"varint,10,opt,name=flags" json:"flags,omitempty"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return "TODO" }
+
+
+
+func (m *Field) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetType() Type {
+ if m != nil {
+ return m.Type
+ }
+ return Type_NULL_TYPE
+}
+
+func (m *Field) GetTable() string {
+ if m != nil {
+ return m.Table
+ }
+ return ""
+}
+
+func (m *Field) GetOrgTable() string {
+ if m != nil {
+ return m.OrgTable
+ }
+ return ""
+}
+
+func (m *Field) GetDatabase() string {
+ if m != nil {
+ return m.Database
+ }
+ return ""
+}
+
+func (m *Field) GetOrgName() string {
+ if m != nil {
+ return m.OrgName
+ }
+ return ""
+}
+
+func (m *Field) GetColumnLength() uint32 {
+ if m != nil {
+ return m.ColumnLength
+ }
+ return 0
+}
+
+func (m *Field) GetCharset() uint32 {
+ if m != nil {
+ return m.Charset
+ }
+ return 0
+}
+
+func (m *Field) GetDecimals() uint32 {
+ if m != nil {
+ return m.Decimals
+ }
+ return 0
+}
+
+func (m *Field) GetFlags() uint32 {
+ if m != nil {
+ return m.Flags
+ }
+ return 0
+}
+
+// Row is a database row.
+type Row struct {
+ // lengths contains the length of each value in values.
+ // A length of -1 means that the field is NULL. While
+ // reading values, you have to accummulate the length
+ // to know the offset where the next value begins in values.
+ Lengths []int64 `protobuf:"zigzag64,1,rep,packed,name=lengths" json:"lengths,omitempty"`
+ // values contains a concatenation of all values in the row.
+ Values []byte `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"`
+}
+
+func (m *Row) Reset() { *m = Row{} }
+func (m *Row) String() string { return "TODO" }
+
+
+
+func (m *Row) GetLengths() []int64 {
+ if m != nil {
+ return m.Lengths
+ }
+ return nil
+}
+
+func (m *Row) GetValues() []byte {
+ if m != nil {
+ return m.Values
+ }
+ return nil
+}
+
+// ResultExtras contains optional out-of-band information. Usually the
+// extras are requested by adding ExecuteOptions flags.
+type ResultExtras struct {
+ // event_token is populated if the include_event_token flag is set
+ // in ExecuteOptions.
+ EventToken *EventToken `protobuf:"bytes,1,opt,name=event_token,json=eventToken" json:"event_token,omitempty"`
+ // If set, it means the data returned with this result is fresher
+ // than the compare_token passed in the ExecuteOptions.
+ Fresher bool `protobuf:"varint,2,opt,name=fresher" json:"fresher,omitempty"`
+}
+
+func (m *ResultExtras) Reset() { *m = ResultExtras{} }
+func (m *ResultExtras) String() string { return "TODO" }
+
+
+
+func (m *ResultExtras) GetEventToken() *EventToken {
+ if m != nil {
+ return m.EventToken
+ }
+ return nil
+}
+
+func (m *ResultExtras) GetFresher() bool {
+ if m != nil {
+ return m.Fresher
+ }
+ return false
+}
+
+// QueryResult is returned by Execute and ExecuteStream.
+//
+// As returned by Execute, len(fields) is always equal to len(row)
+// (for each row in rows).
+//
+// As returned by StreamExecute, the first QueryResult has the fields
+// set, and subsequent QueryResult have rows set. And as Execute,
+// len(QueryResult[0].fields) is always equal to len(row) (for each
+// row in rows for each QueryResult in QueryResult[1:]).
+type QueryResult struct {
+ Fields []*Field `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"`
+ RowsAffected uint64 `protobuf:"varint,2,opt,name=rows_affected,json=rowsAffected" json:"rows_affected,omitempty"`
+ InsertId uint64 `protobuf:"varint,3,opt,name=insert_id,json=insertId" json:"insert_id,omitempty"`
+ Rows []*Row `protobuf:"bytes,4,rep,name=rows" json:"rows,omitempty"`
+ Extras *ResultExtras `protobuf:"bytes,5,opt,name=extras" json:"extras,omitempty"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return "TODO" }
+
+
+
+func (m *QueryResult) GetFields() []*Field {
+ if m != nil {
+ return m.Fields
+ }
+ return nil
+}
+
+func (m *QueryResult) GetRowsAffected() uint64 {
+ if m != nil {
+ return m.RowsAffected
+ }
+ return 0
+}
+
+func (m *QueryResult) GetInsertId() uint64 {
+ if m != nil {
+ return m.InsertId
+ }
+ return 0
+}
+
+func (m *QueryResult) GetRows() []*Row {
+ if m != nil {
+ return m.Rows
+ }
+ return nil
+}
+
+func (m *QueryResult) GetExtras() *ResultExtras {
+ if m != nil {
+ return m.Extras
+ }
+ return nil
+}
+
+// StreamEvent describes a set of transformations that happened as a
+// single transactional unit on a server. It is streamed back by the
+// Update Stream calls.
+type StreamEvent struct {
+ // The statements in this transaction.
+ Statements []*StreamEvent_Statement `protobuf:"bytes,1,rep,name=statements" json:"statements,omitempty"`
+ // The Event Token for this event.
+ EventToken *EventToken `protobuf:"bytes,2,opt,name=event_token,json=eventToken" json:"event_token,omitempty"`
+}
+
+func (m *StreamEvent) Reset() { *m = StreamEvent{} }
+func (m *StreamEvent) String() string { return "TODO" }
+
+
+
+func (m *StreamEvent) GetStatements() []*StreamEvent_Statement {
+ if m != nil {
+ return m.Statements
+ }
+ return nil
+}
+
+func (m *StreamEvent) GetEventToken() *EventToken {
+ if m != nil {
+ return m.EventToken
+ }
+ return nil
+}
+
+// One individual Statement in a transaction.
+type StreamEvent_Statement struct {
+ Category StreamEvent_Statement_Category `protobuf:"varint,1,opt,name=category,enum=query.StreamEvent_Statement_Category" json:"category,omitempty"`
+ // table_name, primary_key_fields and primary_key_values are set for DML.
+ TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName" json:"table_name,omitempty"`
+ PrimaryKeyFields []*Field `protobuf:"bytes,3,rep,name=primary_key_fields,json=primaryKeyFields" json:"primary_key_fields,omitempty"`
+ PrimaryKeyValues []*Row `protobuf:"bytes,4,rep,name=primary_key_values,json=primaryKeyValues" json:"primary_key_values,omitempty"`
+ // sql is set for all queries.
+ // FIXME(alainjobart) we may not need it for DMLs.
+ Sql []byte `protobuf:"bytes,5,opt,name=sql,proto3" json:"sql,omitempty"`
+}
+
+func (m *StreamEvent_Statement) Reset() { *m = StreamEvent_Statement{} }
+func (m *StreamEvent_Statement) String() string { return "TODO" }
+
+
+
+func (m *StreamEvent_Statement) GetCategory() StreamEvent_Statement_Category {
+ if m != nil {
+ return m.Category
+ }
+ return StreamEvent_Statement_Error
+}
+
+func (m *StreamEvent_Statement) GetTableName() string {
+ if m != nil {
+ return m.TableName
+ }
+ return ""
+}
+
+func (m *StreamEvent_Statement) GetPrimaryKeyFields() []*Field {
+ if m != nil {
+ return m.PrimaryKeyFields
+ }
+ return nil
+}
+
+func (m *StreamEvent_Statement) GetPrimaryKeyValues() []*Row {
+ if m != nil {
+ return m.PrimaryKeyValues
+ }
+ return nil
+}
+
+func (m *StreamEvent_Statement) GetSql() []byte {
+ if m != nil {
+ return m.Sql
+ }
+ return nil
+}
+
+
+// ExecuteRequest is the payload to Execute
+type ExecuteRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"`
+ TransactionId int64 `protobuf:"varint,5,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} }
+func (m *ExecuteRequest) String() string { return "TODO" }
+
+
+
+func (m *ExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *ExecuteRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *ExecuteRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *ExecuteRequest) GetQuery() *BoundQuery {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *ExecuteRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *ExecuteRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// ExecuteResponse is the returned value from Execute
+type ExecuteResponse struct {
+ Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"`
+}
+
+func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} }
+func (m *ExecuteResponse) String() string { return "TODO" }
+
+
+
+func (m *ExecuteResponse) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// ResultWithError represents a query response
+// in the form of result or error but not both.
+// TODO: To be used in ExecuteBatchResponse and BeginExecuteBatchResponse.
+type ResultWithError struct {
+ // error contains an query level error, only set if result is unset.
+ Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ // result contains the query result, only set if error is unset.
+ Result *QueryResult `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"`
+}
+
+func (m *ResultWithError) Reset() { *m = ResultWithError{} }
+func (m *ResultWithError) String() string { return "TODO" }
+
+
+
+func (m *ResultWithError) GetError() *vtrpc.RPCError {
+ if m != nil {
+ return m.Error
+ }
+ return nil
+}
+
+func (m *ResultWithError) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// ExecuteBatchRequest is the payload to ExecuteBatch
+type ExecuteBatchRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries" json:"queries,omitempty"`
+ AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction" json:"as_transaction,omitempty"`
+ TransactionId int64 `protobuf:"varint,6,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} }
+func (m *ExecuteBatchRequest) String() string { return "TODO" }
+
+
+
+func (m *ExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *ExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *ExecuteBatchRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *ExecuteBatchRequest) GetQueries() []*BoundQuery {
+ if m != nil {
+ return m.Queries
+ }
+ return nil
+}
+
+func (m *ExecuteBatchRequest) GetAsTransaction() bool {
+ if m != nil {
+ return m.AsTransaction
+ }
+ return false
+}
+
+func (m *ExecuteBatchRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *ExecuteBatchRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// ExecuteBatchResponse is the returned value from ExecuteBatch
+type ExecuteBatchResponse struct {
+ Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"`
+}
+
+func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} }
+func (m *ExecuteBatchResponse) String() string { return "TODO" }
+
+
+
+func (m *ExecuteBatchResponse) GetResults() []*QueryResult {
+ if m != nil {
+ return m.Results
+ }
+ return nil
+}
+
+// StreamExecuteRequest is the payload to StreamExecute
+type StreamExecuteRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} }
+func (m *StreamExecuteRequest) String() string { return "TODO" }
+
+
+
+func (m *StreamExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *StreamExecuteRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *StreamExecuteRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *StreamExecuteRequest) GetQuery() *BoundQuery {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *StreamExecuteRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// StreamExecuteResponse is the returned value from StreamExecute
+type StreamExecuteResponse struct {
+ Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"`
+}
+
+func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} }
+func (m *StreamExecuteResponse) String() string { return "TODO" }
+
+
+
+func (m *StreamExecuteResponse) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// BeginRequest is the payload to Begin
+type BeginRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *BeginRequest) Reset() { *m = BeginRequest{} }
+func (m *BeginRequest) String() string { return "TODO" }
+
+
+
+func (m *BeginRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *BeginRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *BeginRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *BeginRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// BeginResponse is the returned value from Begin
+type BeginResponse struct {
+ TransactionId int64 `protobuf:"varint,1,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+}
+
+func (m *BeginResponse) Reset() { *m = BeginResponse{} }
+func (m *BeginResponse) String() string { return "TODO" }
+
+
+
+func (m *BeginResponse) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+// CommitRequest is the payload to Commit
+type CommitRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+}
+
+func (m *CommitRequest) Reset() { *m = CommitRequest{} }
+func (m *CommitRequest) String() string { return "TODO" }
+
+
+
+func (m *CommitRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *CommitRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *CommitRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *CommitRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+// CommitResponse is the returned value from Commit
+type CommitResponse struct {
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return "TODO" }
+
+
+
+// RollbackRequest is the payload to Rollback
+type RollbackRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+}
+
+func (m *RollbackRequest) Reset() { *m = RollbackRequest{} }
+func (m *RollbackRequest) String() string { return "TODO" }
+
+
+
+func (m *RollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *RollbackRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *RollbackRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *RollbackRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+// RollbackResponse is the returned value from Rollback
+type RollbackResponse struct {
+}
+
+func (m *RollbackResponse) Reset() { *m = RollbackResponse{} }
+func (m *RollbackResponse) String() string { return "TODO" }
+
+
+
+// PrepareRequest is the payload to Prepare
+type PrepareRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *PrepareRequest) Reset() { *m = PrepareRequest{} }
+func (m *PrepareRequest) String() string { return "TODO" }
+
+
+
+func (m *PrepareRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *PrepareRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *PrepareRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *PrepareRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *PrepareRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// PrepareResponse is the returned value from Prepare
+type PrepareResponse struct {
+}
+
+func (m *PrepareResponse) Reset() { *m = PrepareResponse{} }
+func (m *PrepareResponse) String() string { return "TODO" }
+
+
+
+// CommitPreparedRequest is the payload to CommitPrepared
+type CommitPreparedRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *CommitPreparedRequest) Reset() { *m = CommitPreparedRequest{} }
+func (m *CommitPreparedRequest) String() string { return "TODO" }
+
+
+
+func (m *CommitPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *CommitPreparedRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *CommitPreparedRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *CommitPreparedRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// CommitPreparedResponse is the returned value from CommitPrepared
+type CommitPreparedResponse struct {
+}
+
+func (m *CommitPreparedResponse) Reset() { *m = CommitPreparedResponse{} }
+func (m *CommitPreparedResponse) String() string { return "TODO" }
+
+
+
+// RollbackPreparedRequest is the payload to RollbackPrepared
+type RollbackPreparedRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *RollbackPreparedRequest) Reset() { *m = RollbackPreparedRequest{} }
+func (m *RollbackPreparedRequest) String() string { return "TODO" }
+
+
+
+func (m *RollbackPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *RollbackPreparedRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *RollbackPreparedRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *RollbackPreparedRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *RollbackPreparedRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// RollbackPreparedResponse is the returned value from RollbackPrepared
+type RollbackPreparedResponse struct {
+}
+
+func (m *RollbackPreparedResponse) Reset() { *m = RollbackPreparedResponse{} }
+func (m *RollbackPreparedResponse) String() string { return "TODO" }
+
+
+
+// CreateTransactionRequest is the payload to CreateTransaction
+type CreateTransactionRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"`
+ Participants []*Target `protobuf:"bytes,5,rep,name=participants" json:"participants,omitempty"`
+}
+
+func (m *CreateTransactionRequest) Reset() { *m = CreateTransactionRequest{} }
+func (m *CreateTransactionRequest) String() string { return "TODO" }
+
+
+
+func (m *CreateTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *CreateTransactionRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *CreateTransactionRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *CreateTransactionRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+func (m *CreateTransactionRequest) GetParticipants() []*Target {
+ if m != nil {
+ return m.Participants
+ }
+ return nil
+}
+
+// CreateTransactionResponse is the returned value from CreateTransaction
+type CreateTransactionResponse struct {
+}
+
+func (m *CreateTransactionResponse) Reset() { *m = CreateTransactionResponse{} }
+func (m *CreateTransactionResponse) String() string { return "TODO" }
+
+
+
+// StartCommitRequest is the payload to StartCommit
+type StartCommitRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *StartCommitRequest) Reset() { *m = StartCommitRequest{} }
+func (m *StartCommitRequest) String() string { return "TODO" }
+
+
+
+func (m *StartCommitRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *StartCommitRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *StartCommitRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *StartCommitRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *StartCommitRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// StartCommitResponse is the returned value from StartCommit
+type StartCommitResponse struct {
+}
+
+func (m *StartCommitResponse) Reset() { *m = StartCommitResponse{} }
+func (m *StartCommitResponse) String() string { return "TODO" }
+
+
+
+// SetRollbackRequest is the payload to SetRollback
+type SetRollbackRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+ Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *SetRollbackRequest) Reset() { *m = SetRollbackRequest{} }
+func (m *SetRollbackRequest) String() string { return "TODO" }
+
+
+
+func (m *SetRollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *SetRollbackRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *SetRollbackRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *SetRollbackRequest) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+func (m *SetRollbackRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// SetRollbackResponse is the returned value from SetRollback
+type SetRollbackResponse struct {
+}
+
+func (m *SetRollbackResponse) Reset() { *m = SetRollbackResponse{} }
+func (m *SetRollbackResponse) String() string { return "TODO" }
+
+
+
+// ConcludeTransactionRequest is the payload to ConcludeTransaction
+type ConcludeTransactionRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *ConcludeTransactionRequest) Reset() { *m = ConcludeTransactionRequest{} }
+func (m *ConcludeTransactionRequest) String() string { return "TODO" }
+
+
+
+func (m *ConcludeTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *ConcludeTransactionRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *ConcludeTransactionRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *ConcludeTransactionRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// ConcludeTransactionResponse is the returned value from ConcludeTransaction
+type ConcludeTransactionResponse struct {
+}
+
+func (m *ConcludeTransactionResponse) Reset() { *m = ConcludeTransactionResponse{} }
+func (m *ConcludeTransactionResponse) String() string { return "TODO" }
+
+
+
+// ReadTransactionRequest is the payload to ReadTransaction
+type ReadTransactionRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"`
+}
+
+func (m *ReadTransactionRequest) Reset() { *m = ReadTransactionRequest{} }
+func (m *ReadTransactionRequest) String() string { return "TODO" }
+
+
+
+func (m *ReadTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *ReadTransactionRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *ReadTransactionRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *ReadTransactionRequest) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+// ReadTransactionResponse is the returned value from ReadTransaction
+type ReadTransactionResponse struct {
+ Metadata *TransactionMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"`
+}
+
+func (m *ReadTransactionResponse) Reset() { *m = ReadTransactionResponse{} }
+func (m *ReadTransactionResponse) String() string { return "TODO" }
+
+
+
+func (m *ReadTransactionResponse) GetMetadata() *TransactionMetadata {
+ if m != nil {
+ return m.Metadata
+ }
+ return nil
+}
+
+// BeginExecuteRequest is the payload to BeginExecute
+type BeginExecuteRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *BeginExecuteRequest) Reset() { *m = BeginExecuteRequest{} }
+func (m *BeginExecuteRequest) String() string { return "TODO" }
+
+
+
+func (m *BeginExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *BeginExecuteRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *BeginExecuteRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *BeginExecuteRequest) GetQuery() *BoundQuery {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *BeginExecuteRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// BeginExecuteResponse is the returned value from BeginExecute
+type BeginExecuteResponse struct {
+ // error contains an application level error if necessary. Note the
+ // transaction_id may be set, even when an error is returned, if the begin
+ // worked but the execute failed.
+ Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ Result *QueryResult `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"`
+ // transaction_id might be non-zero even if an error is present.
+ TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+}
+
+func (m *BeginExecuteResponse) Reset() { *m = BeginExecuteResponse{} }
+func (m *BeginExecuteResponse) String() string { return "TODO" }
+
+
+
+func (m *BeginExecuteResponse) GetError() *vtrpc.RPCError {
+ if m != nil {
+ return m.Error
+ }
+ return nil
+}
+
+func (m *BeginExecuteResponse) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *BeginExecuteResponse) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+// BeginExecuteBatchRequest is the payload to BeginExecuteBatch
+type BeginExecuteBatchRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries" json:"queries,omitempty"`
+ AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction" json:"as_transaction,omitempty"`
+ Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *BeginExecuteBatchRequest) Reset() { *m = BeginExecuteBatchRequest{} }
+func (m *BeginExecuteBatchRequest) String() string { return "TODO" }
+
+
+
+func (m *BeginExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchRequest) GetQueries() []*BoundQuery {
+ if m != nil {
+ return m.Queries
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchRequest) GetAsTransaction() bool {
+ if m != nil {
+ return m.AsTransaction
+ }
+ return false
+}
+
+func (m *BeginExecuteBatchRequest) GetOptions() *ExecuteOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// BeginExecuteBatchResponse is the returned value from BeginExecuteBatch
+type BeginExecuteBatchResponse struct {
+ // error contains an application level error if necessary. Note the
+ // transaction_id may be set, even when an error is returned, if the begin
+ // worked but the execute failed.
+ Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+ Results []*QueryResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"`
+ // transaction_id might be non-zero even if an error is present.
+ TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"`
+}
+
+func (m *BeginExecuteBatchResponse) Reset() { *m = BeginExecuteBatchResponse{} }
+func (m *BeginExecuteBatchResponse) String() string { return "TODO" }
+
+
+
+func (m *BeginExecuteBatchResponse) GetError() *vtrpc.RPCError {
+ if m != nil {
+ return m.Error
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchResponse) GetResults() []*QueryResult {
+ if m != nil {
+ return m.Results
+ }
+ return nil
+}
+
+func (m *BeginExecuteBatchResponse) GetTransactionId() int64 {
+ if m != nil {
+ return m.TransactionId
+ }
+ return 0
+}
+
+// MessageStreamRequest is the request payload for MessageStream.
+type MessageStreamRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ // name is the message table name.
+ Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+}
+
+func (m *MessageStreamRequest) Reset() { *m = MessageStreamRequest{} }
+func (m *MessageStreamRequest) String() string { return "TODO" }
+
+
+
+func (m *MessageStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *MessageStreamRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *MessageStreamRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *MessageStreamRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+// MessageStreamResponse is a response for MessageStream.
+type MessageStreamResponse struct {
+ Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"`
+}
+
+func (m *MessageStreamResponse) Reset() { *m = MessageStreamResponse{} }
+func (m *MessageStreamResponse) String() string { return "TODO" }
+
+
+
+func (m *MessageStreamResponse) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// MessageAckRequest is the request payload for MessageAck.
+type MessageAckRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ // name is the message table name.
+ Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ Ids []*Value `protobuf:"bytes,5,rep,name=ids" json:"ids,omitempty"`
+}
+
+func (m *MessageAckRequest) Reset() { *m = MessageAckRequest{} }
+func (m *MessageAckRequest) String() string { return "TODO" }
+
+
+
+func (m *MessageAckRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *MessageAckRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *MessageAckRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *MessageAckRequest) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MessageAckRequest) GetIds() []*Value {
+ if m != nil {
+ return m.Ids
+ }
+ return nil
+}
+
+// MessageAckResponse is the response for MessageAck.
+type MessageAckResponse struct {
+ // result contains the result of the ack operation.
+ // Since this acts like a DML, only
+ // RowsAffected is returned in the result.
+ Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"`
+}
+
+func (m *MessageAckResponse) Reset() { *m = MessageAckResponse{} }
+func (m *MessageAckResponse) String() string { return "TODO" }
+
+
+
+func (m *MessageAckResponse) GetResult() *QueryResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+// SplitQueryRequest is the payload for SplitQuery sent by VTGate to a VTTablet.
+// See vtgate.SplitQueryRequest for more details.
+type SplitQueryRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"`
+ SplitColumn []string `protobuf:"bytes,5,rep,name=split_column,json=splitColumn" json:"split_column,omitempty"`
+ // Exactly one of the following must be nonzero.
+ SplitCount int64 `protobuf:"varint,6,opt,name=split_count,json=splitCount" json:"split_count,omitempty"`
+ NumRowsPerQueryPart int64 `protobuf:"varint,8,opt,name=num_rows_per_query_part,json=numRowsPerQueryPart" json:"num_rows_per_query_part,omitempty"`
+ Algorithm SplitQueryRequest_Algorithm `protobuf:"varint,9,opt,name=algorithm,enum=query.SplitQueryRequest_Algorithm" json:"algorithm,omitempty"`
+}
+
+func (m *SplitQueryRequest) Reset() { *m = SplitQueryRequest{} }
+func (m *SplitQueryRequest) String() string { return "TODO" }
+
+
+
+func (m *SplitQueryRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *SplitQueryRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *SplitQueryRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *SplitQueryRequest) GetQuery() *BoundQuery {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *SplitQueryRequest) GetSplitColumn() []string {
+ if m != nil {
+ return m.SplitColumn
+ }
+ return nil
+}
+
+func (m *SplitQueryRequest) GetSplitCount() int64 {
+ if m != nil {
+ return m.SplitCount
+ }
+ return 0
+}
+
+func (m *SplitQueryRequest) GetNumRowsPerQueryPart() int64 {
+ if m != nil {
+ return m.NumRowsPerQueryPart
+ }
+ return 0
+}
+
+func (m *SplitQueryRequest) GetAlgorithm() SplitQueryRequest_Algorithm {
+ if m != nil {
+ return m.Algorithm
+ }
+ return SplitQueryRequest_EQUAL_SPLITS
+}
+
+// QuerySplit represents one query to execute on the tablet
+type QuerySplit struct {
+ // query is the query to execute
+ Query *BoundQuery `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"`
+ // row_count is the approximate row count the query will return
+ RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount" json:"row_count,omitempty"`
+}
+
+func (m *QuerySplit) Reset() { *m = QuerySplit{} }
+func (m *QuerySplit) String() string { return "TODO" }
+
+
+
+func (m *QuerySplit) GetQuery() *BoundQuery {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *QuerySplit) GetRowCount() int64 {
+ if m != nil {
+ return m.RowCount
+ }
+ return 0
+}
+
+// SplitQueryResponse is returned by SplitQuery and represents all the queries
+// to execute in order to get the entire data set.
+type SplitQueryResponse struct {
+ Queries []*QuerySplit `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"`
+}
+
+func (m *SplitQueryResponse) Reset() { *m = SplitQueryResponse{} }
+func (m *SplitQueryResponse) String() string { return "TODO" }
+
+
+
+func (m *SplitQueryResponse) GetQueries() []*QuerySplit {
+ if m != nil {
+ return m.Queries
+ }
+ return nil
+}
+
+// StreamHealthRequest is the payload for StreamHealth
+type StreamHealthRequest struct {
+}
+
+func (m *StreamHealthRequest) Reset() { *m = StreamHealthRequest{} }
+func (m *StreamHealthRequest) String() string { return "TODO" }
+
+
+
+// RealtimeStats contains information about the tablet status
+type RealtimeStats struct {
+ // health_error is the last error we got from health check,
+ // or empty is the server is healthy. This is used for subset selection,
+ // we do not send queries to servers that are not healthy.
+ HealthError string `protobuf:"bytes,1,opt,name=health_error,json=healthError" json:"health_error,omitempty"`
+ // seconds_behind_master is populated for slaves only. It indicates
+ // how far behind on (MySQL) replication a slave currently is. It is used
+ // by clients for subset selection (so we don't try to send traffic
+ // to tablets that are too far behind).
+ // NOTE: This field must not be evaluated if "health_error" is not empty.
+ // TODO(mberlin): Let's switch it to int64 instead?
+ SecondsBehindMaster uint32 `protobuf:"varint,2,opt,name=seconds_behind_master,json=secondsBehindMaster" json:"seconds_behind_master,omitempty"`
+ // bin_log_players_count is the number of currently running binlog players.
+ // if the value is 0, it means that filtered replication is currently not
+ // running on the tablet. If >0, filtered replication is running.
+ // NOTE: This field must not be evaluated if "health_error" is not empty.
+ BinlogPlayersCount int32 `protobuf:"varint,3,opt,name=binlog_players_count,json=binlogPlayersCount" json:"binlog_players_count,omitempty"`
+ // seconds_behind_master_filtered_replication is populated for the receiving
+ // master of an ongoing filtered replication only.
+ // It specifies how far the receiving master lags behind the sending master.
+ // NOTE: This field must not be evaluated if "health_error" is not empty.
+ // NOTE: This field must not be evaluated if "bin_log_players_count" is 0.
+ SecondsBehindMasterFilteredReplication int64 `protobuf:"varint,4,opt,name=seconds_behind_master_filtered_replication,json=secondsBehindMasterFilteredReplication" json:"seconds_behind_master_filtered_replication,omitempty"`
+ // cpu_usage is used for load-based balancing
+ CpuUsage float64 `protobuf:"fixed64,5,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"`
+ // qps is the average QPS (queries per second) rate in the last XX seconds
+ // where XX is usually 60 (See query_service_stats.go).
+ Qps float64 `protobuf:"fixed64,6,opt,name=qps" json:"qps,omitempty"`
+}
+
+func (m *RealtimeStats) Reset() { *m = RealtimeStats{} }
+func (m *RealtimeStats) String() string { return "TODO" }
+
+
+
+func (m *RealtimeStats) GetHealthError() string {
+ if m != nil {
+ return m.HealthError
+ }
+ return ""
+}
+
+func (m *RealtimeStats) GetSecondsBehindMaster() uint32 {
+ if m != nil {
+ return m.SecondsBehindMaster
+ }
+ return 0
+}
+
+func (m *RealtimeStats) GetBinlogPlayersCount() int32 {
+ if m != nil {
+ return m.BinlogPlayersCount
+ }
+ return 0
+}
+
+func (m *RealtimeStats) GetSecondsBehindMasterFilteredReplication() int64 {
+ if m != nil {
+ return m.SecondsBehindMasterFilteredReplication
+ }
+ return 0
+}
+
+func (m *RealtimeStats) GetCpuUsage() float64 {
+ if m != nil {
+ return m.CpuUsage
+ }
+ return 0
+}
+
+func (m *RealtimeStats) GetQps() float64 {
+ if m != nil {
+ return m.Qps
+ }
+ return 0
+}
+
+// StreamHealthResponse is streamed by StreamHealth on a regular basis
+type StreamHealthResponse struct {
+ // target is the current server type. Only queries with that exact Target
+ // record will be accepted.
+ Target *Target `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"`
+ // serving is true iff the tablet is serving. A tablet may not be serving
+ // if filtered replication is enabled on a master for instance,
+ // or if a replica should not be used because the keyspace is being resharded.
+ Serving bool `protobuf:"varint,2,opt,name=serving" json:"serving,omitempty"`
+ // tablet_externally_reparented_timestamp can be interpreted as the last time
+ // we knew that this tablet was the MASTER of this shard.
+ //
+ // It is used by vtgate when determining the current MASTER of a shard.
+ // If vtgate sees more than one MASTER tablet, this timestamp is used
+ // as tiebreaker where the MASTER with the highest timestamp wins.
+ // Another usage of this timestamp is in go/vt/vtgate/buffer to detect the end
+ // of a reparent (failover) and stop buffering.
+ //
+ // In practice, this field is set to:
+ // a) the last time the RPC tabletmanager.TabletExternallyReparented was
+ // called on this tablet (usually done by an external failover tool e.g.
+ // Orchestrator). The failover tool can call this as long as we are the
+ // master i.e. even ages after the last reparent occurred.
+ // OR
+ // b) the last time an active reparent was executed through a vtctl command
+ // (InitShardMaster, PlannedReparentShard, EmergencyReparentShard)
+ // OR
+ // c) the last time vttablet was started and it initialized its tablet type
+ // as MASTER because it was recorded as the shard's current master in the
+ // topology (see go/vt/vttablet/tabletmanager/init_tablet.go)
+ // OR
+ // d) 0 if the vttablet was never a MASTER.
+ TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp" json:"tablet_externally_reparented_timestamp,omitempty"`
+ // realtime_stats contains information about the tablet status
+ RealtimeStats *RealtimeStats `protobuf:"bytes,4,opt,name=realtime_stats,json=realtimeStats" json:"realtime_stats,omitempty"`
+ // tablet_alias is the alias of the sending tablet. The discovery/healthcheck.go
+ // code uses it to verify that it's talking to the correct tablet and that it
+ // hasn't changed in the meantime e.g. due to tablet restarts where ports or
+ // ips have been reused but assigned differently.
+ TabletAlias *topodata.TabletAlias `protobuf:"bytes,5,opt,name=tablet_alias,json=tabletAlias" json:"tablet_alias,omitempty"`
+}
+
+func (m *StreamHealthResponse) Reset() { *m = StreamHealthResponse{} }
+func (m *StreamHealthResponse) String() string { return "TODO" }
+
+
+
+func (m *StreamHealthResponse) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *StreamHealthResponse) GetServing() bool {
+ if m != nil {
+ return m.Serving
+ }
+ return false
+}
+
+func (m *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 {
+ if m != nil {
+ return m.TabletExternallyReparentedTimestamp
+ }
+ return 0
+}
+
+func (m *StreamHealthResponse) GetRealtimeStats() *RealtimeStats {
+ if m != nil {
+ return m.RealtimeStats
+ }
+ return nil
+}
+
+func (m *StreamHealthResponse) GetTabletAlias() *topodata.TabletAlias {
+ if m != nil {
+ return m.TabletAlias
+ }
+ return nil
+}
+
+// UpdateStreamRequest is the payload for UpdateStream. At most one of
+// position and timestamp can be set. If neither is set, we will start
+// streaming from the current binlog position.
+type UpdateStreamRequest struct {
+ EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"`
+ ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"`
+ Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"`
+ // If position is set, we will start the streaming from that replication
+ // position. Incompatible with timestamp.
+ Position string `protobuf:"bytes,4,opt,name=position" json:"position,omitempty"`
+ // If timestamp is set, we will start the streaming from the first
+ // event in the binlogs that have that timestamp. Incompatible with position.
+ Timestamp int64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"`
+}
+
+func (m *UpdateStreamRequest) Reset() { *m = UpdateStreamRequest{} }
+func (m *UpdateStreamRequest) String() string { return "TODO" }
+
+
+
+func (m *UpdateStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID {
+ if m != nil {
+ return m.EffectiveCallerId
+ }
+ return nil
+}
+
+func (m *UpdateStreamRequest) GetImmediateCallerId() *VTGateCallerID {
+ if m != nil {
+ return m.ImmediateCallerId
+ }
+ return nil
+}
+
+func (m *UpdateStreamRequest) GetTarget() *Target {
+ if m != nil {
+ return m.Target
+ }
+ return nil
+}
+
+func (m *UpdateStreamRequest) GetPosition() string {
+ if m != nil {
+ return m.Position
+ }
+ return ""
+}
+
+func (m *UpdateStreamRequest) GetTimestamp() int64 {
+ if m != nil {
+ return m.Timestamp
+ }
+ return 0
+}
+
+// UpdateStreamResponse is returned by UpdateStream
+type UpdateStreamResponse struct {
+ Event *StreamEvent `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"`
+}
+
+func (m *UpdateStreamResponse) Reset() { *m = UpdateStreamResponse{} }
+func (m *UpdateStreamResponse) String() string { return "TODO" }
+
+
+
+func (m *UpdateStreamResponse) GetEvent() *StreamEvent {
+ if m != nil {
+ return m.Event
+ }
+ return nil
+}
+
+// TransactionMetadata contains the metadata for a distributed transaction.
+type TransactionMetadata struct {
+ Dtid string `protobuf:"bytes,1,opt,name=dtid" json:"dtid,omitempty"`
+ State TransactionState `protobuf:"varint,2,opt,name=state,enum=query.TransactionState" json:"state,omitempty"`
+ TimeCreated int64 `protobuf:"varint,3,opt,name=time_created,json=timeCreated" json:"time_created,omitempty"`
+ Participants []*Target `protobuf:"bytes,4,rep,name=participants" json:"participants,omitempty"`
+}
+
+func (m *TransactionMetadata) Reset() { *m = TransactionMetadata{} }
+func (m *TransactionMetadata) String() string { return "TODO" }
+
+
+
+func (m *TransactionMetadata) GetDtid() string {
+ if m != nil {
+ return m.Dtid
+ }
+ return ""
+}
+
+func (m *TransactionMetadata) GetState() TransactionState {
+ if m != nil {
+ return m.State
+ }
+ return TransactionState_UNKNOWN
+}
+
+func (m *TransactionMetadata) GetTimeCreated() int64 {
+ if m != nil {
+ return m.TimeCreated
+ }
+ return 0
+}
+
+func (m *TransactionMetadata) GetParticipants() []*Target {
+ if m != nil {
+ return m.Participants
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Target)(nil), "query.Target")
+ proto.RegisterType((*VTGateCallerID)(nil), "query.VTGateCallerID")
+ proto.RegisterType((*EventToken)(nil), "query.EventToken")
+ proto.RegisterType((*Value)(nil), "query.Value")
+ proto.RegisterType((*BindVariable)(nil), "query.BindVariable")
+ proto.RegisterType((*BoundQuery)(nil), "query.BoundQuery")
+ proto.RegisterType((*ExecuteOptions)(nil), "query.ExecuteOptions")
+ proto.RegisterType((*Field)(nil), "query.Field")
+ proto.RegisterType((*Row)(nil), "query.Row")
+ proto.RegisterType((*ResultExtras)(nil), "query.ResultExtras")
+ proto.RegisterType((*QueryResult)(nil), "query.QueryResult")
+ proto.RegisterType((*StreamEvent)(nil), "query.StreamEvent")
+ proto.RegisterType((*StreamEvent_Statement)(nil), "query.StreamEvent.Statement")
+ proto.RegisterType((*ExecuteRequest)(nil), "query.ExecuteRequest")
+ proto.RegisterType((*ExecuteResponse)(nil), "query.ExecuteResponse")
+ proto.RegisterType((*ResultWithError)(nil), "query.ResultWithError")
+ proto.RegisterType((*ExecuteBatchRequest)(nil), "query.ExecuteBatchRequest")
+ proto.RegisterType((*ExecuteBatchResponse)(nil), "query.ExecuteBatchResponse")
+ proto.RegisterType((*StreamExecuteRequest)(nil), "query.StreamExecuteRequest")
+ proto.RegisterType((*StreamExecuteResponse)(nil), "query.StreamExecuteResponse")
+ proto.RegisterType((*BeginRequest)(nil), "query.BeginRequest")
+ proto.RegisterType((*BeginResponse)(nil), "query.BeginResponse")
+ proto.RegisterType((*CommitRequest)(nil), "query.CommitRequest")
+ proto.RegisterType((*CommitResponse)(nil), "query.CommitResponse")
+ proto.RegisterType((*RollbackRequest)(nil), "query.RollbackRequest")
+ proto.RegisterType((*RollbackResponse)(nil), "query.RollbackResponse")
+ proto.RegisterType((*PrepareRequest)(nil), "query.PrepareRequest")
+ proto.RegisterType((*PrepareResponse)(nil), "query.PrepareResponse")
+ proto.RegisterType((*CommitPreparedRequest)(nil), "query.CommitPreparedRequest")
+ proto.RegisterType((*CommitPreparedResponse)(nil), "query.CommitPreparedResponse")
+ proto.RegisterType((*RollbackPreparedRequest)(nil), "query.RollbackPreparedRequest")
+ proto.RegisterType((*RollbackPreparedResponse)(nil), "query.RollbackPreparedResponse")
+ proto.RegisterType((*CreateTransactionRequest)(nil), "query.CreateTransactionRequest")
+ proto.RegisterType((*CreateTransactionResponse)(nil), "query.CreateTransactionResponse")
+ proto.RegisterType((*StartCommitRequest)(nil), "query.StartCommitRequest")
+ proto.RegisterType((*StartCommitResponse)(nil), "query.StartCommitResponse")
+ proto.RegisterType((*SetRollbackRequest)(nil), "query.SetRollbackRequest")
+ proto.RegisterType((*SetRollbackResponse)(nil), "query.SetRollbackResponse")
+ proto.RegisterType((*ConcludeTransactionRequest)(nil), "query.ConcludeTransactionRequest")
+ proto.RegisterType((*ConcludeTransactionResponse)(nil), "query.ConcludeTransactionResponse")
+ proto.RegisterType((*ReadTransactionRequest)(nil), "query.ReadTransactionRequest")
+ proto.RegisterType((*ReadTransactionResponse)(nil), "query.ReadTransactionResponse")
+ proto.RegisterType((*BeginExecuteRequest)(nil), "query.BeginExecuteRequest")
+ proto.RegisterType((*BeginExecuteResponse)(nil), "query.BeginExecuteResponse")
+ proto.RegisterType((*BeginExecuteBatchRequest)(nil), "query.BeginExecuteBatchRequest")
+ proto.RegisterType((*BeginExecuteBatchResponse)(nil), "query.BeginExecuteBatchResponse")
+ proto.RegisterType((*MessageStreamRequest)(nil), "query.MessageStreamRequest")
+ proto.RegisterType((*MessageStreamResponse)(nil), "query.MessageStreamResponse")
+ proto.RegisterType((*MessageAckRequest)(nil), "query.MessageAckRequest")
+ proto.RegisterType((*MessageAckResponse)(nil), "query.MessageAckResponse")
+ proto.RegisterType((*SplitQueryRequest)(nil), "query.SplitQueryRequest")
+ proto.RegisterType((*QuerySplit)(nil), "query.QuerySplit")
+ proto.RegisterType((*SplitQueryResponse)(nil), "query.SplitQueryResponse")
+ proto.RegisterType((*StreamHealthRequest)(nil), "query.StreamHealthRequest")
+ proto.RegisterType((*RealtimeStats)(nil), "query.RealtimeStats")
+ proto.RegisterType((*StreamHealthResponse)(nil), "query.StreamHealthResponse")
+ proto.RegisterType((*UpdateStreamRequest)(nil), "query.UpdateStreamRequest")
+ proto.RegisterType((*UpdateStreamResponse)(nil), "query.UpdateStreamResponse")
+ proto.RegisterType((*TransactionMetadata)(nil), "query.TransactionMetadata")
+ proto.RegisterEnum("query.MySqlFlag", MySqlFlag_name, MySqlFlag_value)
+ proto.RegisterEnum("query.Flag", Flag_name, Flag_value)
+ proto.RegisterEnum("query.Type", Type_name, Type_value)
+ proto.RegisterEnum("query.TransactionState", TransactionState_name, TransactionState_value)
+ proto.RegisterEnum("query.ExecuteOptions_IncludedFields", ExecuteOptions_IncludedFields_name, ExecuteOptions_IncludedFields_value)
+ proto.RegisterEnum("query.ExecuteOptions_Workload", ExecuteOptions_Workload_name, ExecuteOptions_Workload_value)
+ proto.RegisterEnum("query.ExecuteOptions_TransactionIsolation", ExecuteOptions_TransactionIsolation_name, ExecuteOptions_TransactionIsolation_value)
+ proto.RegisterEnum("query.StreamEvent_Statement_Category", StreamEvent_Statement_Category_name, StreamEvent_Statement_Category_value)
+ proto.RegisterEnum("query.SplitQueryRequest_Algorithm", SplitQueryRequest_Algorithm_name, SplitQueryRequest_Algorithm_value)
+}
+
+*/
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/bind_variables.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/bind_variables.go
new file mode 100644
index 000000000..573febfeb
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/bind_variables.go
@@ -0,0 +1,266 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqltypes
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+)
+
+// NullBindVariable is a bindvar with NULL value.
+var NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE}
+
+// ValueToProto converts Value to a *querypb.Value.
+func ValueToProto(v Value) *querypb.Value {
+ return &querypb.Value{Type: v.typ, Value: v.val}
+}
+
+// ProtoToValue converts a *querypb.Value to a Value.
+func ProtoToValue(v *querypb.Value) Value {
+ return MakeTrusted(v.Type, v.Value)
+}
+
+// BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]interface{}.
+func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVariable, error) {
+ if len(in) == 0 {
+ return nil, nil
+ }
+
+ out := make(map[string]*querypb.BindVariable, len(in))
+ for k, v := range in {
+ bv, err := BuildBindVariable(v)
+ if err != nil {
+ return nil, fmt.Errorf("%s: %v", k, err)
+ }
+ out[k] = bv
+ }
+ return out, nil
+}
+
+// Int32BindVariable converts an int32 to a bind var.
+func Int32BindVariable(v int32) *querypb.BindVariable {
+ return ValueBindVariable(NewInt32(v))
+}
+
+// Int64BindVariable converts an int64 to a bind var.
+func Int64BindVariable(v int64) *querypb.BindVariable {
+ return ValueBindVariable(NewInt64(v))
+}
+
+// Uint64BindVariable converts a uint64 to a bind var.
+func Uint64BindVariable(v uint64) *querypb.BindVariable {
+ return ValueBindVariable(NewUint64(v))
+}
+
+// Float64BindVariable converts a float64 to a bind var.
+func Float64BindVariable(v float64) *querypb.BindVariable {
+ return ValueBindVariable(NewFloat64(v))
+}
+
+// StringBindVariable converts a string to a bind var.
+func StringBindVariable(v string) *querypb.BindVariable {
+ return ValueBindVariable(NewVarChar(v))
+}
+
+// BytesBindVariable converts a []byte to a bind var.
+func BytesBindVariable(v []byte) *querypb.BindVariable {
+ return &querypb.BindVariable{Type: VarBinary, Value: v}
+}
+
+// ValueBindVariable converts a Value to a bind var.
+func ValueBindVariable(v Value) *querypb.BindVariable {
+ return &querypb.BindVariable{Type: v.typ, Value: v.val}
+}
+
+// BuildBindVariable builds a *querypb.BindVariable from a valid input type.
+func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) {
+ switch v := v.(type) {
+ case string:
+ return StringBindVariable(v), nil
+ case []byte:
+ return BytesBindVariable(v), nil
+ case int:
+ return &querypb.BindVariable{
+ Type: querypb.Type_INT64,
+ Value: strconv.AppendInt(nil, int64(v), 10),
+ }, nil
+ case int64:
+ return Int64BindVariable(v), nil
+ case uint64:
+ return Uint64BindVariable(v), nil
+ case float64:
+ return Float64BindVariable(v), nil
+ case nil:
+ return NullBindVariable, nil
+ case Value:
+ return ValueBindVariable(v), nil
+ case *querypb.BindVariable:
+ return v, nil
+ case []interface{}:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ lbv, err := BuildBindVariable(lv)
+ if err != nil {
+ return nil, err
+ }
+ values[i].Type = lbv.Type
+ values[i].Value = lbv.Value
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case []string:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_VARCHAR
+ values[i].Value = []byte(lv)
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case [][]byte:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_VARBINARY
+ values[i].Value = lv
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case []int:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_INT64
+ values[i].Value = strconv.AppendInt(nil, int64(lv), 10)
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case []int64:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_INT64
+ values[i].Value = strconv.AppendInt(nil, lv, 10)
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case []uint64:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_UINT64
+ values[i].Value = strconv.AppendUint(nil, lv, 10)
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ case []float64:
+ bv := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ Values: make([]*querypb.Value, len(v)),
+ }
+ values := make([]querypb.Value, len(v))
+ for i, lv := range v {
+ values[i].Type = querypb.Type_FLOAT64
+ values[i].Value = strconv.AppendFloat(nil, lv, 'g', -1, 64)
+ bv.Values[i] = &values[i]
+ }
+ return bv, nil
+ }
+ return nil, fmt.Errorf("type %T not supported as bind var: %v", v, v)
+}
+
+// ValidateBindVariables validates a map[string]*querypb.BindVariable.
+func ValidateBindVariables(bv map[string]*querypb.BindVariable) error {
+ for k, v := range bv {
+ if err := ValidateBindVariable(v); err != nil {
+ return fmt.Errorf("%s: %v", k, err)
+ }
+ }
+ return nil
+}
+
+// ValidateBindVariable returns an error if the bind variable has inconsistent
+// fields.
+func ValidateBindVariable(bv *querypb.BindVariable) error {
+ if bv == nil {
+ return errors.New("bind variable is nil")
+ }
+
+ if bv.Type == querypb.Type_TUPLE {
+ if len(bv.Values) == 0 {
+ return errors.New("empty tuple is not allowed")
+ }
+ for _, val := range bv.Values {
+ if val.Type == querypb.Type_TUPLE {
+ return errors.New("tuple not allowed inside another tuple")
+ }
+ if err := ValidateBindVariable(&querypb.BindVariable{Type: val.Type, Value: val.Value}); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+
+ // If NewValue succeeds, the value is valid.
+ _, err := NewValue(bv.Type, bv.Value)
+ return err
+}
+
+// BindVariableToValue converts a bind var into a Value.
+func BindVariableToValue(bv *querypb.BindVariable) (Value, error) {
+ if bv.Type == querypb.Type_TUPLE {
+ return NULL, errors.New("cannot convert a TUPLE bind var into a value")
+ }
+ return MakeTrusted(bv.Type, bv.Value), nil
+}
+
+// BindVariablesEqual compares two maps of bind variables.
+func BindVariablesEqual(x, y map[string]*querypb.BindVariable) bool {
+ return reflect.DeepEqual(&querypb.BoundQuery{BindVariables: x}, &querypb.BoundQuery{BindVariables: y})
+}
+
+// CopyBindVariables returns a shallow-copy of the given bindVariables map.
+func CopyBindVariables(bindVariables map[string]*querypb.BindVariable) map[string]*querypb.BindVariable {
+ result := make(map[string]*querypb.BindVariable, len(bindVariables))
+ for key, value := range bindVariables {
+ result[key] = value
+ }
+ return result
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/plan_value.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/plan_value.go
new file mode 100644
index 000000000..d44e55108
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/plan_value.go
@@ -0,0 +1,259 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqltypes
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+)
+
+// PlanValue represents a value or a list of values for
+// a column that will later be resolved using bind vars and used
+// to perform plan actions like generating the final query or
+// deciding on a route.
+//
+// Plan values are typically used as a slice ([]planValue)
+// where each entry is for one column. For situations where
+// the required output is a list of rows (like in the case
+// of multi-value inserts), the representation is pivoted.
+// For example, a statement like this:
+// INSERT INTO t VALUES (1, 2), (3, 4)
+// will be represented as follows:
+// []PlanValue{
+// Values: {1, 3},
+// Values: {2, 4},
+// }
+//
+// For WHERE clause items that contain a combination of
+// equality expressions and IN clauses like this:
+// WHERE pk1 = 1 AND pk2 IN (2, 3, 4)
+// The plan values will be represented as follows:
+// []PlanValue{
+// Value: 1,
+// Values: {2, 3, 4},
+// }
+// When converted into rows, columns with single values
+// are replicated as the same for all rows:
+// [][]Value{
+// {1, 2},
+// {1, 3},
+// {1, 4},
+// }
+type PlanValue struct {
+ Key string
+ Value Value
+ ListKey string
+ Values []PlanValue
+}
+
+// IsNull returns true if the PlanValue is NULL.
+func (pv PlanValue) IsNull() bool {
+ return pv.Key == "" && pv.Value.IsNull() && pv.ListKey == "" && pv.Values == nil
+}
+
+// IsList returns true if the PlanValue is a list.
+func (pv PlanValue) IsList() bool {
+ return pv.ListKey != "" || pv.Values != nil
+}
+
+// ResolveValue resolves a PlanValue as a single value based on the supplied bindvars.
+func (pv PlanValue) ResolveValue(bindVars map[string]*querypb.BindVariable) (Value, error) {
+ switch {
+ case pv.Key != "":
+ bv, err := pv.lookupValue(bindVars)
+ if err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(bv.Type, bv.Value), nil
+ case !pv.Value.IsNull():
+ return pv.Value, nil
+ case pv.ListKey != "" || pv.Values != nil:
+ // This code is unreachable because the parser does not allow
+ // multi-value constructs where a single value is expected.
+ return NULL, errors.New("a list was supplied where a single value was expected")
+ }
+ return NULL, nil
+}
+
+func (pv PlanValue) lookupValue(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
+ bv, ok := bindVars[pv.Key]
+ if !ok {
+ return nil, fmt.Errorf("missing bind var %s", pv.Key)
+ }
+ if bv.Type == querypb.Type_TUPLE {
+ return nil, fmt.Errorf("TUPLE was supplied for single value bind var %s", pv.ListKey)
+ }
+ return bv, nil
+}
+
+// ResolveList resolves a PlanValue as a list of values based on the supplied bindvars.
+func (pv PlanValue) ResolveList(bindVars map[string]*querypb.BindVariable) ([]Value, error) {
+ switch {
+ case pv.ListKey != "":
+ bv, err := pv.lookupList(bindVars)
+ if err != nil {
+ return nil, err
+ }
+ values := make([]Value, 0, len(bv.Values))
+ for _, val := range bv.Values {
+ values = append(values, MakeTrusted(val.Type, val.Value))
+ }
+ return values, nil
+ case pv.Values != nil:
+ values := make([]Value, 0, len(pv.Values))
+ for _, val := range pv.Values {
+ v, err := val.ResolveValue(bindVars)
+ if err != nil {
+ return nil, err
+ }
+ values = append(values, v)
+ }
+ return values, nil
+ }
+ // This code is unreachable because the parser does not allow
+ // single value constructs where multiple values are expected.
+ return nil, errors.New("a single value was supplied where a list was expected")
+}
+
+func (pv PlanValue) lookupList(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) {
+ bv, ok := bindVars[pv.ListKey]
+ if !ok {
+ return nil, fmt.Errorf("missing bind var %s", pv.ListKey)
+ }
+ if bv.Type != querypb.Type_TUPLE {
+ return nil, fmt.Errorf("single value was supplied for TUPLE bind var %s", pv.ListKey)
+ }
+ return bv, nil
+}
+
+// MarshalJSON should be used only for testing.
+func (pv PlanValue) MarshalJSON() ([]byte, error) {
+ switch {
+ case pv.Key != "":
+ return json.Marshal(":" + pv.Key)
+ case !pv.Value.IsNull():
+ if pv.Value.IsIntegral() {
+ return pv.Value.ToBytes(), nil
+ }
+ return json.Marshal(pv.Value.ToString())
+ case pv.ListKey != "":
+ return json.Marshal("::" + pv.ListKey)
+ case pv.Values != nil:
+ return json.Marshal(pv.Values)
+ }
+ return []byte("null"), nil
+}
+
+func rowCount(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) (int, error) {
+ count := -1
+ setCount := func(l int) error {
+ switch count {
+ case -1:
+ count = l
+ return nil
+ case l:
+ return nil
+ default:
+ return errors.New("mismatch in number of column values")
+ }
+ }
+
+ for _, pv := range pvs {
+ switch {
+ case pv.Key != "" || !pv.Value.IsNull():
+ continue
+ case pv.Values != nil:
+ if err := setCount(len(pv.Values)); err != nil {
+ return 0, err
+ }
+ case pv.ListKey != "":
+ bv, err := pv.lookupList(bindVars)
+ if err != nil {
+ return 0, err
+ }
+ if err := setCount(len(bv.Values)); err != nil {
+ return 0, err
+ }
+ }
+ }
+
+ if count == -1 {
+ // If there were no lists inside, it was a single row.
+ // Note that count can never be 0 because there is enough
+ // protection at the top level: list bind vars must have
+ // at least one value (enforced by vtgate), and AST lists
+ // must have at least one value (enforced by the parser).
+ // Also lists created internally after vtgate validation
+ // ensure at least one value.
+ // TODO(sougou): verify and change API to enforce this.
+ return 1, nil
+ }
+ return count, nil
+}
+
+// ResolveRows resolves a []PlanValue as rows based on the supplied bindvars.
+func ResolveRows(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) ([][]Value, error) {
+ count, err := rowCount(pvs, bindVars)
+ if err != nil {
+ return nil, err
+ }
+
+ // Allocate the rows.
+ rows := make([][]Value, count)
+ for i := range rows {
+ rows[i] = make([]Value, len(pvs))
+ }
+
+ // Using j becasue we're resolving by columns.
+ for j, pv := range pvs {
+ switch {
+ case pv.Key != "":
+ bv, err := pv.lookupValue(bindVars)
+ if err != nil {
+ return nil, err
+ }
+ for i := range rows {
+ rows[i][j] = MakeTrusted(bv.Type, bv.Value)
+ }
+ case !pv.Value.IsNull():
+ for i := range rows {
+ rows[i][j] = pv.Value
+ }
+ case pv.ListKey != "":
+ bv, err := pv.lookupList(bindVars)
+ if err != nil {
+ // This code is unreachable because pvRowCount already checks this.
+ return nil, err
+ }
+ for i := range rows {
+ rows[i][j] = MakeTrusted(bv.Values[i].Type, bv.Values[i].Value)
+ }
+ case pv.Values != nil:
+ for i := range rows {
+ rows[i][j], err = pv.Values[i].ResolveValue(bindVars)
+ if err != nil {
+ return nil, err
+ }
+ }
+ // default case is a NULL value, which the row values are already initialized to.
+ }
+ }
+ return rows, nil
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/testing.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/testing.go
new file mode 100644
index 000000000..78160183c
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/testing.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqltypes
+
+import (
+ querypb "github.com/CovenantSQL/sqlparser/dependency/querypb"
+)
+
+// Functions in this file should only be used for testing.
+// This is an experiment to see if test code bloat can be
+// reduced and readability improved.
+
+/*
+// MakeTestFields builds a []*querypb.Field for testing.
+// fields := sqltypes.MakeTestFields(
+// "a|b",
+// "int64|varchar",
+// )
+// The field types are as defined in querypb and are case
+// insensitive. Column delimiters must be used only to sepearate
+// strings and not at the beginning or the end.
+func MakeTestFields(names, types string) []*querypb.Field {
+ n := split(names)
+ t := split(types)
+ var fields []*querypb.Field
+ for i := range n {
+ fields = append(fields, &querypb.Field{
+ Name: n[i],
+ Type: querypb.Type(querypb.Type_value[strings.ToUpper(t[i])]),
+ })
+ }
+ return fields
+}
+
+// MakeTestResult builds a *sqltypes.Result object for testing.
+// result := sqltypes.MakeTestResult(
+// fields,
+// " 1|a",
+// "10|abcd",
+// )
+// The field type values are set as the types for the rows built.
+// Spaces are trimmed from row values. "null" is treated as NULL.
+func MakeTestResult(fields []*querypb.Field, rows ...string) *Result {
+ result := &Result{
+ Fields: fields,
+ }
+ if len(rows) > 0 {
+ result.Rows = make([][]Value, len(rows))
+ }
+ for i, row := range rows {
+ result.Rows[i] = make([]Value, len(fields))
+ for j, col := range split(row) {
+ if col == "null" {
+ continue
+ }
+ result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col))
+ }
+ }
+ result.RowsAffected = uint64(len(result.Rows))
+ return result
+}
+
+// MakeTestStreamingResults builds a list of results for streaming.
+// results := sqltypes.MakeStreamingResults(
+// fields,
+// "1|a",
+// "2|b",
+// "---",
+// "c|c",
+// )
+// The first result contains only the fields. Subsequent results
+// are built using the field types. Every input that starts with a "-"
+// is treated as streaming delimiter for one result. A final
+// delimiter must not be supplied.
+func MakeTestStreamingResults(fields []*querypb.Field, rows ...string) []*Result {
+ var results []*Result
+ results = append(results, &Result{Fields: fields})
+ start := 0
+ cur := 0
+ // Add a final streaming delimiter to simplify the loop below.
+ rows = append(rows, "-")
+ for cur < len(rows) {
+ if rows[cur][0] != '-' {
+ cur++
+ continue
+ }
+ result := MakeTestResult(fields, rows[start:cur]...)
+ result.Fields = nil
+ result.RowsAffected = 0
+ results = append(results, result)
+ start = cur + 1
+ cur = start
+ }
+ return results
+}
+*/
+
+// TestBindVariable makes a *querypb.BindVariable from
+// an interface{}.It panics on invalid input.
+// This function should only be used for testing.
+func TestBindVariable(v interface{}) *querypb.BindVariable {
+ if v == nil {
+ return NullBindVariable
+ }
+ bv, err := BuildBindVariable(v)
+ if err != nil {
+ panic(err)
+ }
+ return bv
+}
+
+// TestValue builds a Value from typ and val.
+// This function should only be used for testing.
+func TestValue(typ querypb.Type, val string) Value {
+ return MakeTrusted(typ, []byte(val))
+}
+
+/*
+// PrintResults prints []*Results into a string.
+// This function should only be used for testing.
+func PrintResults(results []*Result) string {
+ b := new(bytes.Buffer)
+ for i, r := range results {
+ if i == 0 {
+ fmt.Fprintf(b, "%v", r)
+ continue
+ }
+ fmt.Fprintf(b, ", %v", r)
+ }
+ return b.String()
+}
+
+func split(str string) []string {
+ splits := strings.Split(str, "|")
+ for i, v := range splits {
+ splits[i] = strings.TrimSpace(v)
+ }
+ return splits
+}
+*/
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/type.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/type.go
new file mode 100644
index 000000000..38c582685
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/type.go
@@ -0,0 +1,288 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqltypes
+
+import (
+ "fmt"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+)
+
+// This file provides wrappers and support
+// functions for querypb.Type.
+
+// These bit flags can be used to query on the
+// common properties of types.
+const (
+ flagIsIntegral = int(querypb.Flag_ISINTEGRAL)
+ flagIsUnsigned = int(querypb.Flag_ISUNSIGNED)
+ flagIsFloat = int(querypb.Flag_ISFLOAT)
+ flagIsQuoted = int(querypb.Flag_ISQUOTED)
+ flagIsText = int(querypb.Flag_ISTEXT)
+ flagIsBinary = int(querypb.Flag_ISBINARY)
+)
+
+// IsIntegral returns true if querypb.Type is an integral
+// (signed/unsigned) that can be represented using
+// up to 64 binary bits.
+// If you have a Value object, use its member function.
+func IsIntegral(t querypb.Type) bool {
+ return int(t)&flagIsIntegral == flagIsIntegral
+}
+
+// IsSigned returns true if querypb.Type is a signed integral.
+// If you have a Value object, use its member function.
+func IsSigned(t querypb.Type) bool {
+ return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral
+}
+
+// IsUnsigned returns true if querypb.Type is an unsigned integral.
+// Caution: this is not the same as !IsSigned.
+// If you have a Value object, use its member function.
+func IsUnsigned(t querypb.Type) bool {
+ return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned
+}
+
+// IsFloat returns true is querypb.Type is a floating point.
+// If you have a Value object, use its member function.
+func IsFloat(t querypb.Type) bool {
+ return int(t)&flagIsFloat == flagIsFloat
+}
+
+// IsQuoted returns true if querypb.Type is a quoted text or binary.
+// If you have a Value object, use its member function.
+func IsQuoted(t querypb.Type) bool {
+ return int(t)&flagIsQuoted == flagIsQuoted
+}
+
+// IsText returns true if querypb.Type is a text.
+// If you have a Value object, use its member function.
+func IsText(t querypb.Type) bool {
+ return int(t)&flagIsText == flagIsText
+}
+
+// IsBinary returns true if querypb.Type is a binary.
+// If you have a Value object, use its member function.
+func IsBinary(t querypb.Type) bool {
+ return int(t)&flagIsBinary == flagIsBinary
+}
+
+// isNumber returns true if the type is any type of number.
+func isNumber(t querypb.Type) bool {
+ return IsIntegral(t) || IsFloat(t) || t == Decimal
+}
+
+// Vitess data types. These are idiomatically
+// named synonyms for the querypb.Type values.
+// Although these constants are interchangeable,
+// they should be treated as different from querypb.Type.
+// Use the synonyms only to refer to the type in Value.
+// For proto variables, use the querypb.Type constants
+// instead.
+// The following conditions are non-overlapping
+// and cover all types: IsSigned(), IsUnsigned(),
+// IsFloat(), IsQuoted(), Null, Decimal, Expression.
+// Also, IsIntegral() == (IsSigned()||IsUnsigned()).
+// TestCategory needs to be updated accordingly if
+// you add a new type.
+// If IsBinary or IsText is true, then IsQuoted is
+// also true. But there are IsQuoted types that are
+// neither binary or text.
+// querypb.Type_TUPLE is not included in this list
+// because it's not a valid Value type.
+// TODO(sougou): provide a categorization function
+// that returns enums, which will allow for cleaner
+// switch statements for those who want to cover types
+// by their category.
+const (
+ Null = querypb.Type_NULL_TYPE
+ Int8 = querypb.Type_INT8
+ Uint8 = querypb.Type_UINT8
+ Int16 = querypb.Type_INT16
+ Uint16 = querypb.Type_UINT16
+ Int24 = querypb.Type_INT24
+ Uint24 = querypb.Type_UINT24
+ Int32 = querypb.Type_INT32
+ Uint32 = querypb.Type_UINT32
+ Int64 = querypb.Type_INT64
+ Uint64 = querypb.Type_UINT64
+ Float32 = querypb.Type_FLOAT32
+ Float64 = querypb.Type_FLOAT64
+ Timestamp = querypb.Type_TIMESTAMP
+ Date = querypb.Type_DATE
+ Time = querypb.Type_TIME
+ Datetime = querypb.Type_DATETIME
+ Year = querypb.Type_YEAR
+ Decimal = querypb.Type_DECIMAL
+ Text = querypb.Type_TEXT
+ Blob = querypb.Type_BLOB
+ VarChar = querypb.Type_VARCHAR
+ VarBinary = querypb.Type_VARBINARY
+ Char = querypb.Type_CHAR
+ Binary = querypb.Type_BINARY
+ Bit = querypb.Type_BIT
+ Enum = querypb.Type_ENUM
+ Set = querypb.Type_SET
+ Geometry = querypb.Type_GEOMETRY
+ TypeJSON = querypb.Type_JSON
+ Expression = querypb.Type_EXPRESSION
+)
+
+// bit-shift the mysql flags by two byte so we
+// can merge them with the mysql or vitess types.
+const (
+ mysqlUnsigned = 32
+ mysqlBinary = 128
+ mysqlEnum = 256
+ mysqlSet = 2048
+)
+
+// If you add to this map, make sure you add a test case
+// in tabletserver/endtoend.
+var mysqlToType = map[int64]querypb.Type{
+ 1: Int8,
+ 2: Int16,
+ 3: Int32,
+ 4: Float32,
+ 5: Float64,
+ 6: Null,
+ 7: Timestamp,
+ 8: Int64,
+ 9: Int24,
+ 10: Date,
+ 11: Time,
+ 12: Datetime,
+ 13: Year,
+ 16: Bit,
+ 245: TypeJSON,
+ 246: Decimal,
+ 249: Text,
+ 250: Text,
+ 251: Text,
+ 252: Text,
+ 253: VarChar,
+ 254: Char,
+ 255: Geometry,
+}
+
+// modifyType modifies the vitess type based on the
+// mysql flag. The function checks specific flags based
+// on the type. This allows us to ignore stray flags
+// that MySQL occasionally sets.
+func modifyType(typ querypb.Type, flags int64) querypb.Type {
+ switch typ {
+ case Int8:
+ if flags&mysqlUnsigned != 0 {
+ return Uint8
+ }
+ return Int8
+ case Int16:
+ if flags&mysqlUnsigned != 0 {
+ return Uint16
+ }
+ return Int16
+ case Int32:
+ if flags&mysqlUnsigned != 0 {
+ return Uint32
+ }
+ return Int32
+ case Int64:
+ if flags&mysqlUnsigned != 0 {
+ return Uint64
+ }
+ return Int64
+ case Int24:
+ if flags&mysqlUnsigned != 0 {
+ return Uint24
+ }
+ return Int24
+ case Text:
+ if flags&mysqlBinary != 0 {
+ return Blob
+ }
+ return Text
+ case VarChar:
+ if flags&mysqlBinary != 0 {
+ return VarBinary
+ }
+ return VarChar
+ case Char:
+ if flags&mysqlBinary != 0 {
+ return Binary
+ }
+ if flags&mysqlEnum != 0 {
+ return Enum
+ }
+ if flags&mysqlSet != 0 {
+ return Set
+ }
+ return Char
+ }
+ return typ
+}
+
+// MySQLToType computes the vitess type from mysql type and flags.
+func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) {
+ result, ok := mysqlToType[mysqlType]
+ if !ok {
+ return 0, fmt.Errorf("unsupported type: %d", mysqlType)
+ }
+ return modifyType(result, flags), nil
+}
+
+// typeToMySQL is the reverse of mysqlToType.
+var typeToMySQL = map[querypb.Type]struct {
+ typ int64
+ flags int64
+}{
+ Int8: {typ: 1},
+ Uint8: {typ: 1, flags: mysqlUnsigned},
+ Int16: {typ: 2},
+ Uint16: {typ: 2, flags: mysqlUnsigned},
+ Int32: {typ: 3},
+ Uint32: {typ: 3, flags: mysqlUnsigned},
+ Float32: {typ: 4},
+ Float64: {typ: 5},
+ Null: {typ: 6, flags: mysqlBinary},
+ Timestamp: {typ: 7},
+ Int64: {typ: 8},
+ Uint64: {typ: 8, flags: mysqlUnsigned},
+ Int24: {typ: 9},
+ Uint24: {typ: 9, flags: mysqlUnsigned},
+ Date: {typ: 10, flags: mysqlBinary},
+ Time: {typ: 11, flags: mysqlBinary},
+ Datetime: {typ: 12, flags: mysqlBinary},
+ Year: {typ: 13, flags: mysqlUnsigned},
+ Bit: {typ: 16, flags: mysqlUnsigned},
+ TypeJSON: {typ: 245},
+ Decimal: {typ: 246},
+ Text: {typ: 252},
+ Blob: {typ: 252, flags: mysqlBinary},
+ VarChar: {typ: 253},
+ VarBinary: {typ: 253, flags: mysqlBinary},
+ Char: {typ: 254},
+ Binary: {typ: 254, flags: mysqlBinary},
+ Enum: {typ: 254, flags: mysqlEnum},
+ Set: {typ: 254, flags: mysqlSet},
+ Geometry: {typ: 255},
+}
+
+// TypeToMySQL returns the equivalent mysql type and flag for a vitess type.
+func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) {
+ val := typeToMySQL[typ]
+ return val.typ, val.flags
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/value.go b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/value.go
new file mode 100644
index 000000000..126555d04
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/dependency/sqltypes/value.go
@@ -0,0 +1,375 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package sqltypes implements interfaces and types that represent SQL values.
+package sqltypes
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "strconv"
+
+ "github.com/CovenantSQL/sqlparser/dependency/bytes2"
+ "github.com/CovenantSQL/sqlparser/dependency/hack"
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+)
+
+var (
+ // NULL represents the NULL value.
+ NULL = Value{}
+
+ // DontEscape tells you if a character should not be escaped.
+ DontEscape = byte(255)
+
+ nullstr = []byte("null")
+)
+
+// BinWriter interface is used for encoding values.
+// Types like bytes.Buffer conform to this interface.
+// We expect the writer objects to be in-memory buffers.
+// So, we don't expect the write operations to fail.
+type BinWriter interface {
+ Write([]byte) (int, error)
+}
+
+// Value can store any SQL value. If the value represents
+// an integral type, the bytes are always stored as a cannonical
+// representation that matches how MySQL returns such values.
+type Value struct {
+ typ querypb.Type
+ val []byte
+}
+
+// NewValue builds a Value using typ and val. If the value and typ
+// don't match, it returns an error.
+func NewValue(typ querypb.Type, val []byte) (v Value, err error) {
+ switch {
+ case IsSigned(typ):
+ if _, err := strconv.ParseInt(string(val), 0, 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsUnsigned(typ):
+ if _, err := strconv.ParseUint(string(val), 0, 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsFloat(typ) || typ == Decimal:
+ if _, err := strconv.ParseFloat(string(val), 64); err != nil {
+ return NULL, err
+ }
+ return MakeTrusted(typ, val), nil
+ case IsQuoted(typ) || typ == Null:
+ return MakeTrusted(typ, val), nil
+ }
+ // All other types are unsafe or invalid.
+ return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ)
+}
+
+// MakeTrusted makes a new Value based on the type.
+// This function should only be used if you know the value
+// and type conform to the rules. Every place this function is
+// called, a comment is needed that explains why it's justified.
+// Exceptions: The current package and mysql package do not need
+// comments. Other packages can also use the function to create
+// VarBinary or VarChar values.
+func MakeTrusted(typ querypb.Type, val []byte) Value {
+ if typ == Null {
+ return NULL
+ }
+ return Value{typ: typ, val: val}
+}
+
+// NewInt64 builds an Int64 Value.
+func NewInt64(v int64) Value {
+ return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10))
+}
+
+// NewInt32 builds an Int64 Value.
+func NewInt32(v int32) Value {
+ return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10))
+}
+
+// NewUint64 builds an Uint64 Value.
+func NewUint64(v uint64) Value {
+ return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10))
+}
+
+// NewFloat64 builds an Float64 Value.
+func NewFloat64(v float64) Value {
+ return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64))
+}
+
+// NewVarChar builds a VarChar Value.
+func NewVarChar(v string) Value {
+ return MakeTrusted(VarChar, []byte(v))
+}
+
+// NewVarBinary builds a VarBinary Value.
+// The input is a string because it's the most common use case.
+func NewVarBinary(v string) Value {
+ return MakeTrusted(VarBinary, []byte(v))
+}
+
+// NewIntegral builds an integral type from a string representaion.
+// The type will be Int64 or Uint64. Int64 will be preferred where possible.
+func NewIntegral(val string) (n Value, err error) {
+ signed, err := strconv.ParseInt(val, 0, 64)
+ if err == nil {
+ return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil
+ }
+ unsigned, err := strconv.ParseUint(val, 0, 64)
+ if err != nil {
+ return Value{}, err
+ }
+ return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil
+}
+
+// InterfaceToValue builds a value from a go type.
+// Supported types are nil, int64, uint64, float64,
+// string and []byte.
+// This function is deprecated. Use the type-specific
+// functions instead.
+func InterfaceToValue(goval interface{}) (Value, error) {
+ switch goval := goval.(type) {
+ case nil:
+ return NULL, nil
+ case []byte:
+ return MakeTrusted(VarBinary, goval), nil
+ case int64:
+ return NewInt64(goval), nil
+ case uint64:
+ return NewUint64(goval), nil
+ case float64:
+ return NewFloat64(goval), nil
+ case string:
+ return NewVarChar(goval), nil
+ default:
+ return NULL, fmt.Errorf("unexpected type %T: %v", goval, goval)
+ }
+}
+
+// Type returns the type of Value.
+func (v Value) Type() querypb.Type {
+ return v.typ
+}
+
+// Raw returns the internal represenation of the value. For newer types,
+// this may not match MySQL's representation.
+func (v Value) Raw() []byte {
+ return v.val
+}
+
+// ToBytes returns the value as MySQL would return it as []byte.
+// In contrast, Raw returns the internal representation of the Value, which may not
+// match MySQL's representation for newer types.
+// If the value is not convertible like in the case of Expression, it returns nil.
+func (v Value) ToBytes() []byte {
+ if v.typ == Expression {
+ return nil
+ }
+ return v.val
+}
+
+// Len returns the length.
+func (v Value) Len() int {
+ return len(v.val)
+}
+
+// ToString returns the value as MySQL would return it as string.
+// If the value is not convertible like in the case of Expression, it returns nil.
+func (v Value) ToString() string {
+ if v.typ == Expression {
+ return ""
+ }
+ return hack.String(v.val)
+}
+
+// String returns a printable version of the value.
+func (v Value) String() string {
+ if v.typ == Null {
+ return "NULL"
+ }
+ if v.IsQuoted() {
+ return fmt.Sprintf("%v(%q)", v.typ, v.val)
+ }
+ return fmt.Sprintf("%v(%s)", v.typ, v.val)
+}
+
+// EncodeSQL encodes the value into an SQL statement. Can be binary.
+func (v Value) EncodeSQL(b BinWriter) {
+ switch {
+ case v.typ == Null:
+ b.Write(nullstr)
+ case v.IsQuoted():
+ encodeBytesSQL(v.val, b)
+ default:
+ b.Write(v.val)
+ }
+}
+
+// EncodeASCII encodes the value using 7-bit clean ascii bytes.
+func (v Value) EncodeASCII(b BinWriter) {
+ switch {
+ case v.typ == Null:
+ b.Write(nullstr)
+ case v.IsQuoted():
+ encodeBytesASCII(v.val, b)
+ default:
+ b.Write(v.val)
+ }
+}
+
+// IsNull returns true if Value is null.
+func (v Value) IsNull() bool {
+ return v.typ == Null
+}
+
+// IsIntegral returns true if Value is an integral.
+func (v Value) IsIntegral() bool {
+ return IsIntegral(v.typ)
+}
+
+// IsSigned returns true if Value is a signed integral.
+func (v Value) IsSigned() bool {
+ return IsSigned(v.typ)
+}
+
+// IsUnsigned returns true if Value is an unsigned integral.
+func (v Value) IsUnsigned() bool {
+ return IsUnsigned(v.typ)
+}
+
+// IsFloat returns true if Value is a float.
+func (v Value) IsFloat() bool {
+ return IsFloat(v.typ)
+}
+
+// IsQuoted returns true if Value must be SQL-quoted.
+func (v Value) IsQuoted() bool {
+ return IsQuoted(v.typ)
+}
+
+// IsText returns true if Value is a collatable text.
+func (v Value) IsText() bool {
+ return IsText(v.typ)
+}
+
+// IsBinary returns true if Value is binary.
+func (v Value) IsBinary() bool {
+ return IsBinary(v.typ)
+}
+
+// MarshalJSON should only be used for testing.
+// It's not a complete implementation.
+func (v Value) MarshalJSON() ([]byte, error) {
+ switch {
+ case v.IsQuoted():
+ return json.Marshal(v.ToString())
+ case v.typ == Null:
+ return nullstr, nil
+ }
+ return v.val, nil
+}
+
+// UnmarshalJSON should only be used for testing.
+// It's not a complete implementation.
+func (v *Value) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ return fmt.Errorf("error unmarshaling empty bytes")
+ }
+ var val interface{}
+ var err error
+ switch b[0] {
+ case '-':
+ var ival int64
+ err = json.Unmarshal(b, &ival)
+ val = ival
+ case '"':
+ var bval []byte
+ err = json.Unmarshal(b, &bval)
+ val = bval
+ case 'n': // null
+ err = json.Unmarshal(b, &val)
+ default:
+ var uval uint64
+ err = json.Unmarshal(b, &uval)
+ val = uval
+ }
+ if err != nil {
+ return err
+ }
+ *v, err = InterfaceToValue(val)
+ return err
+}
+
+func encodeBytesSQL(val []byte, b BinWriter) {
+ buf := &bytes2.Buffer{}
+ buf.WriteByte('\'')
+ for _, ch := range val {
+ if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape {
+ buf.WriteByte(ch)
+ } else {
+ buf.WriteByte('\\')
+ buf.WriteByte(encodedChar)
+ }
+ }
+ buf.WriteByte('\'')
+ b.Write(buf.Bytes())
+}
+
+func encodeBytesASCII(val []byte, b BinWriter) {
+ buf := &bytes2.Buffer{}
+ buf.WriteByte('\'')
+ encoder := base64.NewEncoder(base64.StdEncoding, buf)
+ encoder.Write(val)
+ encoder.Close()
+ buf.WriteByte('\'')
+ b.Write(buf.Bytes())
+}
+
+// SQLEncodeMap specifies how to escape binary data with '\'.
+// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html
+var SQLEncodeMap [256]byte
+
+// SQLDecodeMap is the reverse of SQLEncodeMap
+var SQLDecodeMap [256]byte
+
+var encodeRef = map[byte]byte{
+ '\x00': '0',
+ '\'': '\'',
+ '"': '"',
+ '\b': 'b',
+ '\n': 'n',
+ '\r': 'r',
+ '\t': 't',
+ 26: 'Z', // ctl-Z
+ '\\': '\\',
+}
+
+func init() {
+ for i := range SQLEncodeMap {
+ SQLEncodeMap[i] = DontEscape
+ SQLDecodeMap[i] = DontEscape
+ }
+ for i := range SQLEncodeMap {
+ if to, ok := encodeRef[byte(i)]; ok {
+ SQLEncodeMap[byte(i)] = to
+ SQLDecodeMap[to] = byte(i)
+ }
+ }
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/encodable.go b/vendor/github.com/CovenantSQL/sqlparser/encodable.go
new file mode 100644
index 000000000..427553a9a
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/encodable.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "bytes"
+
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+// This file contains types that are 'Encodable'.
+
+// Encodable defines the interface for types that can
+// be custom-encoded into SQL.
+type Encodable interface {
+ EncodeSQL(buf *bytes.Buffer)
+}
+
+// InsertValues is a custom SQL encoder for the values of
+// an insert statement.
+type InsertValues [][]sqltypes.Value
+
+// EncodeSQL performs the SQL encoding for InsertValues.
+func (iv InsertValues) EncodeSQL(buf *bytes.Buffer) {
+ for i, rows := range iv {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteByte('(')
+ for j, bv := range rows {
+ if j != 0 {
+ buf.WriteString(", ")
+ }
+ bv.EncodeSQL(buf)
+ }
+ buf.WriteByte(')')
+ }
+}
+
+// TupleEqualityList is for generating equality constraints
+// for tables that have composite primary keys.
+type TupleEqualityList struct {
+ Columns []ColIdent
+ Rows [][]sqltypes.Value
+}
+
+// EncodeSQL generates the where clause constraints for the tuple
+// equality.
+func (tpl *TupleEqualityList) EncodeSQL(buf *bytes.Buffer) {
+ if len(tpl.Columns) == 1 {
+ tpl.encodeAsIn(buf)
+ return
+ }
+ tpl.encodeAsEquality(buf)
+}
+
+func (tpl *TupleEqualityList) encodeAsIn(buf *bytes.Buffer) {
+ Append(buf, tpl.Columns[0])
+ buf.WriteString(" in (")
+ for i, r := range tpl.Rows {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ r[0].EncodeSQL(buf)
+ }
+ buf.WriteByte(')')
+}
+
+func (tpl *TupleEqualityList) encodeAsEquality(buf *bytes.Buffer) {
+ for i, r := range tpl.Rows {
+ if i != 0 {
+ buf.WriteString(" or ")
+ }
+ buf.WriteString("(")
+ for j, c := range tpl.Columns {
+ if j != 0 {
+ buf.WriteString(" and ")
+ }
+ Append(buf, c)
+ buf.WriteString(" = ")
+ r[j].EncodeSQL(buf)
+ }
+ buf.WriteByte(')')
+ }
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/impossible_query.go b/vendor/github.com/CovenantSQL/sqlparser/impossible_query.go
new file mode 100644
index 000000000..1179b6112
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/impossible_query.go
@@ -0,0 +1,39 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+// FormatImpossibleQuery creates an impossible query in a TrackedBuffer.
+// An impossible query is a modified version of a query where all selects have where clauses that are
+// impossible for mysql to resolve. This is used in the vtgate and vttablet:
+//
+// - In the vtgate it's used for joins: if the first query returns no result, then vtgate uses the impossible
+// query just to fetch field info from vttablet
+// - In the vttablet, it's just an optimization: the field info is fetched once form MySQL, cached and reused
+// for subsequent queries
+func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) {
+ switch node := node.(type) {
+ case *Select:
+ buf.Myprintf("select %v from %v where 1 != 1", node.SelectExprs, node.From)
+ if node.GroupBy != nil {
+ node.GroupBy.Format(buf)
+ }
+ case *Union:
+ buf.Myprintf("%v %s %v", node.Left, node.Type, node.Right)
+ default:
+ node.Format(buf)
+ }
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/normalizer.go b/vendor/github.com/CovenantSQL/sqlparser/normalizer.go
new file mode 100644
index 000000000..d3edb56cd
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/normalizer.go
@@ -0,0 +1,223 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "fmt"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+// Normalize changes the statement to use bind values, and
+// updates the bind vars to those values. The supplied prefix
+// is used to generate the bind var names. The function ensures
+// that there are no collisions with existing bind vars.
+// Within Select constructs, bind vars are deduped. This allows
+// us to identify vindex equality. Otherwise, every value is
+// treated as distinct.
+func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) {
+ nz := newNormalizer(stmt, bindVars, prefix)
+ _ = Walk(nz.WalkStatement, stmt)
+}
+
+type normalizer struct {
+ stmt Statement
+ bindVars map[string]*querypb.BindVariable
+ prefix string
+ reserved map[string]struct{}
+ counter int
+ vals map[string]string
+}
+
+func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer {
+ return &normalizer{
+ stmt: stmt,
+ bindVars: bindVars,
+ prefix: prefix,
+ reserved: GetBindvars(stmt),
+ counter: 1,
+ vals: make(map[string]string),
+ }
+}
+
+// WalkStatement is the top level walk function.
+// If it encounters a Select, it switches to a mode
+// where variables are deduped.
+func (nz *normalizer) WalkStatement(node SQLNode) (bool, error) {
+ switch node := node.(type) {
+ case *Select:
+ _ = Walk(nz.WalkSelect, node)
+ // Don't continue
+ return false, nil
+ case *SQLVal:
+ nz.convertSQLVal(node)
+ case *ComparisonExpr:
+ nz.convertComparison(node)
+ }
+ return true, nil
+}
+
+// WalkSelect normalizes the AST in Select mode.
+func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) {
+ switch node := node.(type) {
+ case *SQLVal:
+ nz.convertSQLValDedup(node)
+ case *ComparisonExpr:
+ nz.convertComparison(node)
+ }
+ return true, nil
+}
+
+func (nz *normalizer) convertSQLValDedup(node *SQLVal) {
+ // If value is too long, don't dedup.
+ // Such values are most likely not for vindexes.
+ // We save a lot of CPU because we avoid building
+ // the key for them.
+ if len(node.Val) > 256 {
+ nz.convertSQLVal(node)
+ return
+ }
+
+ // Make the bindvar
+ bval := nz.sqlToBindvar(node)
+ if bval == nil {
+ return
+ }
+
+ // Check if there's a bindvar for that value already.
+ var key string
+ if bval.Type == sqltypes.VarBinary {
+ // Prefixing strings with "'" ensures that a string
+ // and number that have the same representation don't
+ // collide.
+ key = "'" + string(node.Val)
+ } else {
+ key = string(node.Val)
+ }
+ bvname, ok := nz.vals[key]
+ if !ok {
+ // If there's no such bindvar, make a new one.
+ bvname = nz.newName()
+ nz.vals[key] = bvname
+ nz.bindVars[bvname] = bval
+ }
+
+ // Modify the AST node to a bindvar.
+ node.Type = ValArg
+ node.Val = append([]byte(":"), bvname...)
+}
+
+// convertSQLVal converts an SQLVal without the dedup.
+func (nz *normalizer) convertSQLVal(node *SQLVal) {
+ bval := nz.sqlToBindvar(node)
+ if bval == nil {
+ return
+ }
+
+ bvname := nz.newName()
+ nz.bindVars[bvname] = bval
+
+ node.Type = ValArg
+ node.Val = append([]byte(":"), bvname...)
+}
+
+// convertComparison attempts to convert IN clauses to
+// use the list bind var construct. If it fails, it returns
+// with no change made. The walk function will then continue
+// and iterate on converting each individual value into separate
+// bind vars.
+func (nz *normalizer) convertComparison(node *ComparisonExpr) {
+ if node.Operator != InStr && node.Operator != NotInStr {
+ return
+ }
+ tupleVals, ok := node.Right.(ValTuple)
+ if !ok {
+ return
+ }
+ // The RHS is a tuple of values.
+ // Make a list bindvar.
+ bvals := &querypb.BindVariable{
+ Type: querypb.Type_TUPLE,
+ }
+ for _, val := range tupleVals {
+ bval := nz.sqlToBindvar(val)
+ if bval == nil {
+ return
+ }
+ bvals.Values = append(bvals.Values, &querypb.Value{
+ Type: bval.Type,
+ Value: bval.Value,
+ })
+ }
+ bvname := nz.newName()
+ nz.bindVars[bvname] = bvals
+ // Modify RHS to be a list bindvar.
+ node.Right = ListArg(append([]byte("::"), bvname...))
+}
+
+func (nz *normalizer) sqlToBindvar(node SQLNode) *querypb.BindVariable {
+ if node, ok := node.(*SQLVal); ok {
+ var v sqltypes.Value
+ var err error
+ switch node.Type {
+ case StrVal:
+ v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Val)
+ case IntVal:
+ v, err = sqltypes.NewValue(sqltypes.Int64, node.Val)
+ case FloatVal:
+ v, err = sqltypes.NewValue(sqltypes.Float64, node.Val)
+ default:
+ return nil
+ }
+ if err != nil {
+ return nil
+ }
+ return sqltypes.ValueBindVariable(v)
+ }
+ return nil
+}
+
+func (nz *normalizer) newName() string {
+ for {
+ newName := fmt.Sprintf("%s%d", nz.prefix, nz.counter)
+ if _, ok := nz.reserved[newName]; !ok {
+ nz.reserved[newName] = struct{}{}
+ return newName
+ }
+ nz.counter++
+ }
+}
+
+// GetBindvars returns a map of the bind vars referenced in the statement.
+// TODO(sougou); This function gets called again from vtgate/planbuilder.
+// Ideally, this should be done only once.
+func GetBindvars(stmt Statement) map[string]struct{} {
+ bindvars := make(map[string]struct{})
+ _ = Walk(func(node SQLNode) (kontinue bool, err error) {
+ switch node := node.(type) {
+ case *SQLVal:
+ if node.Type == ValArg {
+ bindvars[string(node.Val[1:])] = struct{}{}
+ }
+ case ListArg:
+ bindvars[string(node[2:])] = struct{}{}
+ }
+ return true, nil
+ }, stmt)
+ return bindvars
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/parsed_query.go b/vendor/github.com/CovenantSQL/sqlparser/parsed_query.go
new file mode 100644
index 000000000..6b13f2b8c
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/parsed_query.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/CovenantSQL/sqlparser/dependency/querypb"
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+// ParsedQuery represents a parsed query where
+// bind locations are precompued for fast substitutions.
+type ParsedQuery struct {
+ Query string
+ bindLocations []bindLocation
+}
+
+type bindLocation struct {
+ offset, length int
+}
+
+// NewParsedQuery returns a ParsedQuery of the ast.
+func NewParsedQuery(node SQLNode) *ParsedQuery {
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf("%v", node)
+ return buf.ParsedQuery()
+}
+
+// GenerateQuery generates a query by substituting the specified
+// bindVariables. The extras parameter specifies special parameters
+// that can perform custom encoding.
+func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) ([]byte, error) {
+ if len(pq.bindLocations) == 0 {
+ return []byte(pq.Query), nil
+ }
+ buf := bytes.NewBuffer(make([]byte, 0, len(pq.Query)))
+ current := 0
+ for _, loc := range pq.bindLocations {
+ buf.WriteString(pq.Query[current:loc.offset])
+ name := pq.Query[loc.offset : loc.offset+loc.length]
+ if encodable, ok := extras[name[1:]]; ok {
+ encodable.EncodeSQL(buf)
+ } else {
+ supplied, _, err := FetchBindVar(name, bindVariables)
+ if err != nil {
+ return nil, err
+ }
+ EncodeValue(buf, supplied)
+ }
+ current = loc.offset + loc.length
+ }
+ buf.WriteString(pq.Query[current:])
+ return buf.Bytes(), nil
+}
+
+// EncodeValue encodes one bind variable value into the query.
+func EncodeValue(buf *bytes.Buffer, value *querypb.BindVariable) {
+ if value.Type != querypb.Type_TUPLE {
+ // Since we already check for TUPLE, we don't expect an error.
+ v, _ := sqltypes.BindVariableToValue(value)
+ v.EncodeSQL(buf)
+ return
+ }
+
+ // It's a TUPLE.
+ buf.WriteByte('(')
+ for i, bv := range value.Values {
+ if i != 0 {
+ buf.WriteString(", ")
+ }
+ sqltypes.ProtoToValue(bv).EncodeSQL(buf)
+ }
+ buf.WriteByte(')')
+}
+
+// FetchBindVar resolves the bind variable by fetching it from bindVariables.
+func FetchBindVar(name string, bindVariables map[string]*querypb.BindVariable) (val *querypb.BindVariable, isList bool, err error) {
+ name = name[1:]
+ if name[0] == ':' {
+ name = name[1:]
+ isList = true
+ }
+ supplied, ok := bindVariables[name]
+ if !ok {
+ return nil, false, fmt.Errorf("missing bind var %s", name)
+ }
+
+ if isList {
+ if supplied.Type != querypb.Type_TUPLE {
+ return nil, false, fmt.Errorf("unexpected list arg type (%v) for key %s", supplied.Type, name)
+ }
+ if len(supplied.Values) == 0 {
+ return nil, false, fmt.Errorf("empty list supplied for %s", name)
+ }
+ return supplied, true, nil
+ }
+
+ if supplied.Type == querypb.Type_TUPLE {
+ return nil, false, fmt.Errorf("unexpected arg type (TUPLE) for non-list key %s", name)
+ }
+
+ return supplied, false, nil
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/redact_query.go b/vendor/github.com/CovenantSQL/sqlparser/redact_query.go
new file mode 100644
index 000000000..6f5d43135
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/redact_query.go
@@ -0,0 +1,19 @@
+package sqlparser
+
+import querypb "github.com/CovenantSQL/sqlparser/dependency/querypb"
+
+// RedactSQLQuery returns a sql string with the params stripped out for display
+func RedactSQLQuery(sql string) (string, error) {
+ bv := map[string]*querypb.BindVariable{}
+ sqlStripped, comments := SplitMarginComments(sql)
+
+ stmt, err := Parse(sqlStripped)
+ if err != nil {
+ return "", err
+ }
+
+ prefix := "redacted"
+ Normalize(stmt, bv, prefix)
+
+ return comments.Leading + String(stmt) + comments.Trailing, nil
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/sql.go b/vendor/github.com/CovenantSQL/sqlparser/sql.go
new file mode 100644
index 000000000..553226726
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/sql.go
@@ -0,0 +1,3686 @@
+//line sql.y:19
+package sqlparser
+
+import __yyfmt__ "fmt"
+
+//line sql.y:19
+func setParseTree(yylex interface{}, stmt Statement) {
+ yylex.(*Tokenizer).ParseTree = stmt
+}
+
+func setAllowComments(yylex interface{}, allow bool) {
+ yylex.(*Tokenizer).AllowComments = allow
+}
+
+func setDDL(yylex interface{}, ddl *DDL) {
+ yylex.(*Tokenizer).partialDDL = ddl
+}
+
+func incNesting(yylex interface{}) bool {
+ yylex.(*Tokenizer).nesting++
+ if yylex.(*Tokenizer).nesting == 200 {
+ return true
+ }
+ return false
+}
+
+func decNesting(yylex interface{}) {
+ yylex.(*Tokenizer).nesting--
+}
+
+// forceEOF forces the lexer to end prematurely. Not all SQL statements
+// are supported by the Parser, thus calling forceEOF will make the lexer
+// return EOF early.
+func forceEOF(yylex interface{}) {
+ yylex.(*Tokenizer).ForceEOF = true
+}
+
+//line sql.y:54
+type yySymType struct {
+ yys int
+ empty struct{}
+ statement Statement
+ selStmt SelectStatement
+ ddl *DDL
+ ins *Insert
+ byt byte
+ bytes []byte
+ bytes2 [][]byte
+ str string
+ strs []string
+ selectExprs SelectExprs
+ selectExpr SelectExpr
+ columns Columns
+ colName *ColName
+ tableExprs TableExprs
+ tableExpr TableExpr
+ joinCondition JoinCondition
+ tableName TableName
+ expr Expr
+ exprs Exprs
+ boolVal BoolVal
+ colTuple ColTuple
+ values Values
+ valTuple ValTuple
+ subquery *Subquery
+ whens []*When
+ when *When
+ orderBy OrderBy
+ order *Order
+ limit *Limit
+ updateExprs UpdateExprs
+ setExprs SetExprs
+ updateExpr *UpdateExpr
+ setExpr *SetExpr
+ colIdent ColIdent
+ tableIdent TableIdent
+ convertType *ConvertType
+ aliasedTableName *AliasedTableExpr
+ TableSpec *TableSpec
+ columnType ColumnType
+ colKeyOpt ColumnKeyOption
+ optVal *SQLVal
+ LengthScaleOption LengthScaleOption
+ columnDefinition *ColumnDefinition
+ indexDefinition *IndexDefinition
+ indexInfo *IndexInfo
+ indexColumn *IndexColumn
+ indexColumns []*IndexColumn
+}
+
+const LEX_ERROR = 57346
+const UNION = 57347
+const SELECT = 57348
+const INSERT = 57349
+const UPDATE = 57350
+const DELETE = 57351
+const FROM = 57352
+const WHERE = 57353
+const GROUP = 57354
+const HAVING = 57355
+const ORDER = 57356
+const BY = 57357
+const LIMIT = 57358
+const OFFSET = 57359
+const ALL = 57360
+const DISTINCT = 57361
+const AS = 57362
+const EXISTS = 57363
+const ASC = 57364
+const DESC = 57365
+const INTO = 57366
+const KEY = 57367
+const DEFAULT = 57368
+const SET = 57369
+const VALUES = 57370
+const LAST_INSERT_ID = 57371
+const JOIN = 57372
+const LEFT = 57373
+const RIGHT = 57374
+const INNER = 57375
+const OUTER = 57376
+const CROSS = 57377
+const NATURAL = 57378
+const ON = 57379
+const USING = 57380
+const ID = 57381
+const HEX = 57382
+const STRING = 57383
+const INTEGRAL = 57384
+const FLOAT = 57385
+const HEXNUM = 57386
+const VALUE_ARG = 57387
+const LIST_ARG = 57388
+const COMMENT = 57389
+const NULL = 57390
+const TRUE = 57391
+const FALSE = 57392
+const OR = 57393
+const AND = 57394
+const NOT = 57395
+const BETWEEN = 57396
+const CASE = 57397
+const WHEN = 57398
+const THEN = 57399
+const ELSE = 57400
+const END = 57401
+const LE = 57402
+const GE = 57403
+const NE = 57404
+const IS = 57405
+const LIKE = 57406
+const REGEXP = 57407
+const IN = 57408
+const NULL_SAFE_NOTEQUAL = 57409
+const SHIFT_LEFT = 57410
+const SHIFT_RIGHT = 57411
+const DIV = 57412
+const MOD = 57413
+const UNARY = 57414
+const INTERVAL = 57415
+const CREATE = 57416
+const ALTER = 57417
+const DROP = 57418
+const RENAME = 57419
+const ADD = 57420
+const TABLE = 57421
+const INDEX = 57422
+const TO = 57423
+const IGNORE = 57424
+const IF = 57425
+const UNIQUE = 57426
+const PRIMARY = 57427
+const COLUMN = 57428
+const CONSTRAINT = 57429
+const FOREIGN = 57430
+const SHOW = 57431
+const DESCRIBE = 57432
+const DATE = 57433
+const ESCAPE = 57434
+const TINYINT = 57435
+const SMALLINT = 57436
+const MEDIUMINT = 57437
+const INT = 57438
+const INTEGER = 57439
+const BIGINT = 57440
+const INTNUM = 57441
+const REAL = 57442
+const DOUBLE = 57443
+const FLOAT_TYPE = 57444
+const DECIMAL = 57445
+const NUMERIC = 57446
+const TIME = 57447
+const TIMESTAMP = 57448
+const DATETIME = 57449
+const YEAR = 57450
+const CHAR = 57451
+const VARCHAR = 57452
+const BOOL = 57453
+const NCHAR = 57454
+const TEXT = 57455
+const TINYTEXT = 57456
+const MEDIUMTEXT = 57457
+const LONGTEXT = 57458
+const BLOB = 57459
+const TINYBLOB = 57460
+const MEDIUMBLOB = 57461
+const LONGBLOB = 57462
+const AUTO_INCREMENT = 57463
+const SIGNED = 57464
+const UNSIGNED = 57465
+const ZEROFILL = 57466
+const TABLES = 57467
+const CURRENT_TIMESTAMP = 57468
+const CURRENT_DATE = 57469
+const CURRENT_TIME = 57470
+const REPLACE = 57471
+const CAST = 57472
+const SUBSTR = 57473
+const GROUP_CONCAT = 57474
+const SEPARATOR = 57475
+const UNUSED = 57476
+
+var yyToknames = [...]string{
+ "$end",
+ "error",
+ "$unk",
+ "LEX_ERROR",
+ "UNION",
+ "SELECT",
+ "INSERT",
+ "UPDATE",
+ "DELETE",
+ "FROM",
+ "WHERE",
+ "GROUP",
+ "HAVING",
+ "ORDER",
+ "BY",
+ "LIMIT",
+ "OFFSET",
+ "ALL",
+ "DISTINCT",
+ "AS",
+ "EXISTS",
+ "ASC",
+ "DESC",
+ "INTO",
+ "KEY",
+ "DEFAULT",
+ "SET",
+ "VALUES",
+ "LAST_INSERT_ID",
+ "JOIN",
+ "LEFT",
+ "RIGHT",
+ "INNER",
+ "OUTER",
+ "CROSS",
+ "NATURAL",
+ "ON",
+ "USING",
+ "'('",
+ "','",
+ "')'",
+ "ID",
+ "HEX",
+ "STRING",
+ "INTEGRAL",
+ "FLOAT",
+ "HEXNUM",
+ "VALUE_ARG",
+ "LIST_ARG",
+ "COMMENT",
+ "NULL",
+ "TRUE",
+ "FALSE",
+ "OR",
+ "AND",
+ "NOT",
+ "'!'",
+ "BETWEEN",
+ "CASE",
+ "WHEN",
+ "THEN",
+ "ELSE",
+ "END",
+ "'='",
+ "'<'",
+ "'>'",
+ "LE",
+ "GE",
+ "NE",
+ "IS",
+ "LIKE",
+ "REGEXP",
+ "IN",
+ "NULL_SAFE_NOTEQUAL",
+ "'|'",
+ "'&'",
+ "SHIFT_LEFT",
+ "SHIFT_RIGHT",
+ "'+'",
+ "'-'",
+ "'*'",
+ "'/'",
+ "DIV",
+ "'%'",
+ "MOD",
+ "'^'",
+ "'~'",
+ "UNARY",
+ "INTERVAL",
+ "'.'",
+ "CREATE",
+ "ALTER",
+ "DROP",
+ "RENAME",
+ "ADD",
+ "TABLE",
+ "INDEX",
+ "TO",
+ "IGNORE",
+ "IF",
+ "UNIQUE",
+ "PRIMARY",
+ "COLUMN",
+ "CONSTRAINT",
+ "FOREIGN",
+ "SHOW",
+ "DESCRIBE",
+ "DATE",
+ "ESCAPE",
+ "TINYINT",
+ "SMALLINT",
+ "MEDIUMINT",
+ "INT",
+ "INTEGER",
+ "BIGINT",
+ "INTNUM",
+ "REAL",
+ "DOUBLE",
+ "FLOAT_TYPE",
+ "DECIMAL",
+ "NUMERIC",
+ "TIME",
+ "TIMESTAMP",
+ "DATETIME",
+ "YEAR",
+ "CHAR",
+ "VARCHAR",
+ "BOOL",
+ "NCHAR",
+ "TEXT",
+ "TINYTEXT",
+ "MEDIUMTEXT",
+ "LONGTEXT",
+ "BLOB",
+ "TINYBLOB",
+ "MEDIUMBLOB",
+ "LONGBLOB",
+ "AUTO_INCREMENT",
+ "SIGNED",
+ "UNSIGNED",
+ "ZEROFILL",
+ "TABLES",
+ "CURRENT_TIMESTAMP",
+ "CURRENT_DATE",
+ "CURRENT_TIME",
+ "REPLACE",
+ "CAST",
+ "SUBSTR",
+ "GROUP_CONCAT",
+ "SEPARATOR",
+ "UNUSED",
+ "';'",
+}
+var yyStatenames = [...]string{}
+
+const yyEofCode = 1
+const yyErrCode = 2
+const yyInitialStackSize = 16
+
+//line yacctab:1
+var yyExca = [...]int{
+ -1, 1,
+ 1, -1,
+ -2, 0,
+ -1, 3,
+ 5, 17,
+ -2, 4,
+ -1, 113,
+ 1, 153,
+ 5, 153,
+ 11, 153,
+ 12, 153,
+ 13, 153,
+ 14, 153,
+ 16, 153,
+ 27, 153,
+ 30, 153,
+ 31, 153,
+ 33, 153,
+ 35, 153,
+ 36, 153,
+ 37, 153,
+ 38, 153,
+ 40, 153,
+ 41, 153,
+ 152, 153,
+ -2, 166,
+ -1, 180,
+ 90, 353,
+ -2, 349,
+ -1, 181,
+ 90, 354,
+ -2, 350,
+ -1, 398,
+ 5, 17,
+ -2, 322,
+ -1, 514,
+ 90, 356,
+ -2, 352,
+ -1, 546,
+ 5, 18,
+ -2, 225,
+ -1, 604,
+ 5, 18,
+ -2, 323,
+ -1, 669,
+ 5, 17,
+ -2, 325,
+ -1, 726,
+ 5, 18,
+ -2, 326,
+}
+
+const yyPrivate = 57344
+
+const yyLast = 3998
+
+var yyAct = [...]int{
+
+ 181, 377, 618, 133, 529, 425, 455, 509, 333, 530,
+ 139, 98, 31, 582, 426, 424, 484, 402, 521, 46,
+ 165, 469, 203, 93, 93, 118, 331, 541, 436, 204,
+ 3, 421, 513, 460, 167, 430, 131, 183, 93, 44,
+ 144, 207, 194, 451, 401, 30, 110, 709, 135, 561,
+ 443, 511, 109, 622, 699, 96, 623, 624, 625, 697,
+ 108, 681, 461, 626, 24, 26, 15, 16, 462, 125,
+ 86, 122, 106, 461, 438, 88, 87, 255, 635, 84,
+ 85, 22, 120, 127, 93, 250, 249, 713, 83, 93,
+ 638, 555, 607, 589, 539, 473, 376, 28, 350, 351,
+ 352, 353, 354, 355, 356, 349, 93, 130, 136, 184,
+ 349, 335, 124, 337, 185, 93, 93, 93, 538, 168,
+ 25, 89, 93, 664, 522, 93, 337, 93, 93, 491,
+ 522, 93, 596, 186, 146, 440, 248, 682, 680, 438,
+ 441, 97, 489, 490, 488, 25, 437, 340, 339, 18,
+ 19, 20, 115, 103, 559, 627, 119, 114, 370, 371,
+ 372, 373, 374, 256, 21, 23, 258, 684, 477, 479,
+ 480, 338, 113, 478, 28, 90, 94, 378, 575, 576,
+ 577, 384, 444, 330, 487, 336, 335, 336, 335, 93,
+ 394, 190, 683, 93, 93, 93, 93, 393, 126, 564,
+ 563, 337, 93, 337, 27, 562, 93, 417, 418, 93,
+ 725, 437, 404, 46, 620, 435, 434, 398, 714, 715,
+ 591, 677, 676, 25, 352, 353, 354, 355, 356, 349,
+ 391, 392, 403, 429, 205, 419, 123, 336, 335, 431,
+ 407, 129, 409, 415, 666, 406, 119, 408, 556, 113,
+ 420, 507, 457, 337, 336, 335, 93, 246, 189, 134,
+ 367, 369, 336, 335, 466, 134, 631, 630, 720, 211,
+ 337, 606, 134, 719, 247, 628, 368, 251, 337, 253,
+ 254, 515, 134, 24, 463, 375, 453, 454, 379, 380,
+ 381, 382, 32, 385, 192, 134, 467, 386, 387, 388,
+ 24, 213, 212, 196, 199, 668, 197, 399, 198, 200,
+ 542, 543, 114, 114, 114, 114, 28, 422, 466, 191,
+ 515, 396, 397, 602, 192, 205, 466, 113, 113, 113,
+ 113, 629, 192, 28, 46, 537, 592, 24, 466, 549,
+ 113, 400, 389, 28, 474, 475, 537, 481, 482, 445,
+ 456, 613, 483, 552, 471, 492, 493, 494, 495, 496,
+ 497, 498, 499, 500, 501, 502, 503, 504, 505, 506,
+ 28, 542, 543, 196, 199, 46, 197, 485, 198, 200,
+ 452, 516, 458, 40, 12, 545, 412, 411, 378, 517,
+ 413, 512, 414, 199, 410, 508, 558, 429, 184, 718,
+ 46, 717, 404, 446, 447, 553, 404, 459, 464, 419,
+ 252, 202, 514, 132, 448, 449, 450, 404, 102, 519,
+ 535, 104, 518, 546, 99, 46, 523, 527, 688, 533,
+ 550, 548, 531, 536, 547, 100, 544, 403, 524, 525,
+ 526, 528, 32, 687, 35, 429, 348, 347, 357, 358,
+ 350, 351, 352, 353, 354, 355, 356, 349, 637, 93,
+ 37, 38, 422, 93, 554, 128, 116, 34, 93, 95,
+ 36, 46, 29, 1, 617, 433, 423, 486, 117, 39,
+ 583, 432, 679, 621, 404, 439, 566, 560, 512, 25,
+ 571, 471, 572, 570, 429, 442, 557, 569, 216, 379,
+ 579, 580, 581, 217, 568, 578, 574, 215, 219, 514,
+ 218, 585, 214, 586, 646, 632, 201, 532, 206, 25,
+ 468, 41, 121, 485, 590, 366, 105, 598, 599, 600,
+ 601, 597, 674, 534, 390, 182, 603, 604, 605, 595,
+ 686, 378, 636, 594, 383, 520, 145, 610, 476, 608,
+ 46, 612, 46, 46, 611, 609, 155, 616, 404, 614,
+ 429, 152, 587, 154, 153, 615, 46, 142, 395, 341,
+ 429, 593, 429, 619, 143, 514, 137, 112, 403, 187,
+ 634, 195, 193, 540, 111, 465, 429, 114, 708, 14,
+ 46, 643, 33, 639, 641, 642, 644, 107, 11, 10,
+ 17, 9, 113, 662, 645, 665, 663, 46, 46, 8,
+ 429, 565, 667, 7, 46, 567, 46, 6, 5, 4,
+ 673, 550, 166, 486, 101, 13, 2, 671, 672, 0,
+ 0, 0, 588, 0, 675, 669, 429, 531, 0, 46,
+ 0, 0, 0, 0, 0, 91, 91, 0, 691, 0,
+ 692, 690, 0, 645, 0, 0, 404, 695, 0, 429,
+ 91, 0, 0, 0, 0, 0, 706, 0, 0, 0,
+ 0, 0, 0, 711, 0, 710, 685, 0, 46, 0,
+ 716, 0, 0, 0, 0, 633, 0, 0, 0, 0,
+ 722, 0, 0, 0, 0, 0, 721, 378, 619, 0,
+ 712, 0, 0, 531, 0, 0, 91, 0, 0, 0,
+ 0, 91, 724, 0, 46, 46, 726, 0, 723, 0,
+ 0, 0, 532, 0, 0, 670, 0, 729, 91, 0,
+ 0, 0, 0, 0, 727, 728, 0, 91, 209, 91,
+ 0, 0, 0, 0, 91, 0, 0, 91, 0, 91,
+ 91, 0, 0, 257, 334, 230, 0, 235, 236, 237,
+ 238, 239, 240, 0, 241, 242, 243, 244, 245, 231,
+ 232, 233, 234, 220, 221, 0, 0, 222, 223, 224,
+ 225, 226, 227, 228, 229, 0, 0, 0, 532, 0,
+ 25, 347, 357, 358, 350, 351, 352, 353, 354, 355,
+ 356, 349, 678, 0, 0, 0, 0, 0, 0, 0,
+ 0, 91, 0, 0, 405, 91, 91, 91, 91, 0,
+ 0, 0, 0, 0, 416, 0, 0, 0, 91, 0,
+ 693, 209, 694, 0, 696, 0, 698, 0, 700, 701,
+ 702, 703, 704, 705, 323, 314, 293, 325, 274, 285,
+ 329, 286, 287, 308, 265, 301, 67, 0, 277, 261,
+ 283, 262, 275, 295, 298, 273, 316, 327, 59, 297,
+ 299, 313, 292, 309, 268, 303, 306, 326, 91, 0,
+ 0, 45, 0, 427, 428, 0, 0, 0, 0, 0,
+ 305, 322, 284, 307, 260, 304, 0, 264, 266, 328,
+ 320, 280, 281, 551, 0, 0, 0, 0, 0, 296,
+ 300, 310, 290, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 278, 0, 302, 0, 0, 0, 294, 0,
+ 267, 0, 279, 311, 259, 318, 291, 321, 289, 288,
+ 324, 68, 0, 0, 56, 317, 276, 51, 282, 76,
+ 71, 63, 57, 58, 47, 0, 69, 54, 55, 53,
+ 66, 73, 74, 52, 81, 50, 80, 49, 65, 72,
+ 77, 64, 61, 48, 75, 62, 60, 263, 70, 78,
+ 82, 319, 271, 269, 270, 312, 0, 272, 0, 315,
+ 79, 0, 0, 0, 0, 0, 0, 0, 334, 257,
+ 0, 0, 0, 0, 405, 0, 67, 0, 405, 334,
+ 334, 334, 0, 0, 0, 0, 0, 0, 59, 405,
+ 0, 0, 0, 0, 343, 0, 346, 0, 0, 0,
+ 0, 45, 359, 360, 361, 362, 363, 364, 0, 344,
+ 345, 342, 365, 348, 347, 357, 358, 350, 351, 352,
+ 353, 354, 355, 356, 349, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 348, 347, 357, 358, 350, 351,
+ 352, 353, 354, 355, 356, 349, 0, 0, 0, 0,
+ 0, 91, 0, 0, 0, 91, 405, 0, 0, 334,
+ 91, 68, 0, 0, 56, 0, 257, 51, 0, 76,
+ 71, 63, 57, 58, 47, 0, 69, 54, 55, 53,
+ 66, 73, 74, 52, 81, 50, 80, 49, 65, 72,
+ 77, 64, 61, 48, 75, 62, 60, 640, 70, 78,
+ 82, 0, 0, 334, 0, 0, 0, 0, 0, 0,
+ 79, 0, 334, 0, 0, 0, 0, 348, 347, 357,
+ 358, 350, 351, 352, 353, 354, 355, 356, 349, 0,
+ 405, 0, 257, 357, 358, 350, 351, 352, 353, 354,
+ 355, 356, 349, 323, 314, 293, 325, 274, 285, 329,
+ 286, 287, 308, 265, 301, 67, 0, 277, 261, 283,
+ 262, 275, 295, 298, 273, 316, 327, 59, 297, 299,
+ 313, 292, 309, 268, 303, 306, 326, 0, 0, 0,
+ 45, 334, 427, 428, 0, 0, 0, 0, 0, 305,
+ 322, 284, 307, 260, 304, 0, 264, 266, 328, 320,
+ 280, 281, 0, 0, 0, 0, 0, 0, 296, 300,
+ 310, 290, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 278, 0, 302, 0, 0, 0, 294, 405, 267,
+ 0, 279, 311, 259, 318, 291, 321, 289, 288, 324,
+ 68, 0, 0, 56, 317, 276, 51, 282, 76, 71,
+ 63, 57, 58, 47, 0, 69, 54, 55, 53, 66,
+ 73, 74, 52, 81, 50, 80, 49, 65, 72, 77,
+ 64, 61, 48, 75, 62, 60, 263, 70, 78, 82,
+ 319, 271, 269, 270, 312, 0, 272, 0, 315, 79,
+ 323, 314, 293, 325, 274, 285, 329, 286, 287, 308,
+ 265, 301, 67, 0, 277, 261, 283, 262, 275, 295,
+ 298, 273, 316, 327, 59, 297, 299, 313, 292, 309,
+ 268, 303, 306, 326, 0, 0, 0, 45, 0, 0,
+ 0, 0, 0, 0, 0, 0, 305, 322, 284, 307,
+ 260, 304, 0, 264, 266, 328, 320, 280, 281, 0,
+ 0, 0, 0, 0, 0, 296, 300, 310, 290, 0,
+ 0, 0, 0, 0, 0, 0, 689, 0, 278, 0,
+ 302, 0, 0, 0, 294, 0, 267, 0, 279, 311,
+ 259, 318, 291, 321, 289, 288, 324, 68, 0, 0,
+ 56, 317, 276, 51, 282, 76, 71, 63, 57, 58,
+ 47, 0, 69, 54, 55, 53, 66, 73, 74, 52,
+ 81, 50, 80, 49, 65, 72, 77, 64, 61, 48,
+ 75, 62, 60, 263, 70, 78, 82, 319, 271, 269,
+ 270, 312, 0, 272, 0, 315, 79, 323, 314, 293,
+ 325, 274, 285, 329, 286, 287, 308, 265, 301, 67,
+ 0, 277, 261, 283, 262, 275, 295, 298, 273, 316,
+ 327, 59, 297, 299, 313, 292, 309, 268, 303, 306,
+ 326, 28, 0, 0, 45, 0, 0, 0, 0, 0,
+ 0, 0, 0, 305, 322, 284, 307, 260, 304, 0,
+ 264, 266, 328, 320, 280, 281, 0, 0, 0, 0,
+ 0, 0, 296, 300, 310, 290, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 278, 0, 302, 0, 0,
+ 0, 294, 0, 267, 0, 279, 311, 259, 318, 291,
+ 321, 289, 288, 324, 68, 0, 0, 56, 317, 276,
+ 51, 282, 76, 71, 63, 57, 58, 47, 0, 69,
+ 54, 55, 53, 66, 73, 74, 52, 81, 50, 80,
+ 49, 65, 72, 77, 64, 61, 48, 75, 62, 60,
+ 263, 70, 78, 82, 319, 271, 269, 270, 312, 0,
+ 272, 0, 315, 79, 323, 314, 293, 325, 274, 285,
+ 329, 286, 287, 308, 265, 301, 67, 0, 277, 261,
+ 283, 262, 275, 295, 298, 273, 316, 327, 59, 297,
+ 299, 313, 292, 309, 268, 303, 306, 326, 0, 0,
+ 0, 180, 0, 0, 0, 0, 0, 0, 0, 0,
+ 305, 322, 284, 307, 260, 304, 0, 264, 266, 328,
+ 320, 280, 281, 0, 0, 0, 0, 0, 0, 296,
+ 300, 310, 290, 0, 0, 0, 0, 0, 0, 0,
+ 573, 0, 278, 0, 302, 0, 0, 0, 294, 0,
+ 267, 0, 279, 311, 259, 318, 291, 321, 289, 288,
+ 324, 68, 0, 0, 56, 317, 276, 51, 282, 76,
+ 71, 63, 57, 58, 47, 0, 69, 54, 55, 53,
+ 66, 73, 74, 52, 81, 50, 80, 49, 65, 72,
+ 77, 64, 61, 48, 75, 62, 60, 263, 70, 78,
+ 82, 319, 271, 269, 270, 312, 0, 272, 0, 315,
+ 79, 323, 314, 293, 325, 274, 285, 329, 286, 287,
+ 308, 265, 301, 67, 0, 277, 261, 283, 262, 275,
+ 295, 298, 273, 316, 327, 59, 297, 299, 313, 292,
+ 309, 268, 303, 306, 326, 0, 0, 0, 45, 0,
+ 0, 0, 0, 0, 0, 0, 0, 305, 322, 284,
+ 307, 260, 304, 0, 264, 266, 328, 320, 280, 281,
+ 0, 0, 0, 0, 0, 0, 296, 300, 310, 290,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 278,
+ 0, 302, 0, 0, 0, 294, 0, 267, 0, 279,
+ 311, 259, 318, 291, 321, 289, 288, 324, 68, 0,
+ 0, 56, 317, 276, 51, 282, 76, 71, 63, 57,
+ 58, 47, 0, 69, 54, 55, 53, 66, 73, 74,
+ 52, 81, 50, 80, 49, 65, 72, 77, 64, 61,
+ 48, 75, 62, 60, 263, 70, 78, 82, 319, 271,
+ 269, 270, 312, 0, 272, 0, 315, 79, 323, 314,
+ 293, 325, 274, 285, 329, 286, 287, 308, 265, 301,
+ 67, 0, 277, 261, 283, 262, 275, 295, 298, 273,
+ 316, 327, 59, 297, 299, 313, 292, 309, 268, 303,
+ 306, 326, 0, 0, 0, 180, 0, 0, 0, 0,
+ 0, 0, 0, 0, 305, 322, 284, 307, 260, 304,
+ 0, 264, 266, 328, 320, 280, 281, 0, 0, 0,
+ 0, 0, 0, 296, 300, 310, 290, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 278, 0, 302, 0,
+ 0, 0, 294, 0, 267, 0, 279, 311, 259, 318,
+ 291, 321, 289, 288, 324, 68, 0, 0, 56, 317,
+ 276, 51, 282, 76, 71, 63, 57, 58, 47, 0,
+ 69, 54, 55, 53, 66, 73, 74, 52, 81, 50,
+ 80, 49, 65, 72, 77, 64, 61, 48, 75, 62,
+ 60, 263, 70, 78, 82, 319, 271, 269, 270, 312,
+ 0, 272, 0, 315, 79, 323, 314, 293, 325, 274,
+ 285, 329, 286, 287, 308, 265, 301, 67, 0, 277,
+ 261, 283, 262, 275, 295, 298, 273, 316, 327, 59,
+ 297, 299, 313, 292, 309, 268, 303, 306, 326, 0,
+ 0, 0, 92, 0, 0, 0, 0, 0, 0, 0,
+ 0, 305, 322, 284, 307, 260, 304, 0, 264, 266,
+ 328, 320, 280, 281, 0, 0, 0, 0, 0, 0,
+ 296, 300, 310, 290, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 278, 0, 302, 0, 0, 0, 294,
+ 0, 267, 0, 279, 311, 259, 318, 291, 321, 289,
+ 288, 324, 68, 0, 0, 56, 317, 276, 51, 282,
+ 76, 71, 63, 57, 58, 47, 0, 69, 54, 55,
+ 53, 66, 73, 74, 52, 81, 50, 80, 49, 65,
+ 72, 77, 64, 61, 48, 75, 62, 60, 263, 70,
+ 78, 82, 319, 271, 269, 270, 312, 0, 272, 0,
+ 315, 79, 67, 0, 510, 0, 141, 0, 0, 0,
+ 0, 140, 0, 173, 59, 707, 134, 0, 0, 0,
+ 0, 0, 0, 0, 28, 0, 0, 180, 157, 156,
+ 158, 159, 160, 161, 0, 0, 162, 163, 164, 0,
+ 0, 138, 150, 0, 172, 0, 0, 0, 0, 0,
+ 348, 347, 357, 358, 350, 351, 352, 353, 354, 355,
+ 356, 349, 0, 0, 147, 148, 332, 0, 0, 0,
+ 178, 0, 149, 0, 151, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 177, 0, 68, 0, 0,
+ 56, 0, 0, 51, 0, 76, 71, 63, 57, 58,
+ 47, 0, 69, 54, 55, 53, 66, 73, 74, 52,
+ 81, 50, 80, 49, 65, 72, 77, 64, 61, 48,
+ 75, 62, 60, 0, 70, 78, 82, 0, 174, 175,
+ 176, 179, 169, 170, 171, 67, 79, 0, 0, 141,
+ 0, 0, 0, 0, 140, 134, 173, 59, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 28, 0, 0,
+ 180, 157, 156, 158, 159, 160, 161, 0, 0, 162,
+ 163, 164, 0, 0, 138, 150, 0, 172, 0, 348,
+ 347, 357, 358, 350, 351, 352, 353, 354, 355, 356,
+ 349, 0, 0, 0, 0, 0, 0, 147, 148, 332,
+ 0, 0, 0, 178, 0, 149, 0, 151, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 177, 0,
+ 68, 0, 0, 56, 0, 0, 51, 0, 76, 71,
+ 63, 57, 58, 47, 0, 69, 54, 55, 53, 66,
+ 73, 74, 52, 81, 50, 80, 49, 65, 72, 77,
+ 64, 61, 48, 75, 62, 60, 0, 70, 78, 82,
+ 0, 174, 175, 176, 179, 169, 170, 171, 67, 79,
+ 0, 0, 141, 0, 0, 0, 0, 140, 0, 173,
+ 59, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 28, 584, 134, 180, 157, 156, 158, 159, 160, 161,
+ 0, 0, 162, 163, 164, 0, 0, 138, 150, 0,
+ 172, 348, 347, 357, 358, 350, 351, 352, 353, 354,
+ 355, 356, 349, 0, 0, 0, 0, 0, 0, 0,
+ 147, 148, 0, 0, 0, 0, 178, 0, 149, 0,
+ 151, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 177, 0, 68, 0, 0, 56, 0, 0, 51,
+ 0, 76, 71, 63, 57, 58, 47, 0, 69, 54,
+ 55, 53, 66, 73, 74, 52, 81, 50, 80, 49,
+ 65, 72, 77, 64, 61, 48, 75, 62, 60, 0,
+ 70, 78, 82, 24, 174, 175, 176, 179, 169, 170,
+ 171, 0, 79, 0, 67, 0, 0, 0, 141, 0,
+ 0, 0, 0, 140, 0, 173, 59, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 28, 0, 0, 180,
+ 157, 156, 158, 159, 160, 161, 0, 0, 162, 163,
+ 164, 0, 0, 138, 150, 0, 172, 348, 347, 357,
+ 358, 350, 351, 352, 353, 354, 355, 356, 349, 0,
+ 0, 0, 0, 0, 0, 0, 147, 148, 0, 0,
+ 0, 0, 178, 0, 149, 0, 151, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 177, 0, 68,
+ 0, 0, 56, 0, 0, 51, 0, 76, 71, 63,
+ 57, 58, 47, 0, 69, 54, 55, 53, 66, 73,
+ 74, 52, 81, 50, 80, 49, 65, 72, 77, 64,
+ 61, 48, 75, 62, 60, 0, 70, 78, 82, 0,
+ 174, 175, 176, 179, 169, 170, 171, 67, 79, 0,
+ 0, 141, 0, 0, 0, 0, 140, 0, 173, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 28,
+ 0, 0, 180, 157, 156, 158, 159, 160, 161, 0,
+ 0, 162, 163, 164, 0, 0, 138, 150, 0, 172,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 147,
+ 148, 0, 0, 0, 0, 178, 0, 149, 0, 151,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 177, 0, 68, 0, 0, 56, 0, 0, 51, 0,
+ 76, 71, 63, 57, 58, 47, 0, 69, 54, 55,
+ 53, 66, 73, 74, 52, 81, 50, 80, 49, 65,
+ 72, 77, 64, 61, 48, 75, 62, 60, 0, 70,
+ 78, 82, 67, 174, 175, 176, 179, 169, 170, 171,
+ 0, 79, 0, 173, 59, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 28, 0, 0, 180, 157, 156,
+ 158, 159, 160, 161, 0, 0, 162, 163, 164, 0,
+ 0, 0, 150, 0, 172, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 147, 148, 0, 0, 0, 0,
+ 178, 0, 149, 661, 151, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 177, 0, 68, 0, 0,
+ 56, 0, 0, 51, 0, 76, 71, 63, 57, 58,
+ 47, 0, 69, 54, 55, 53, 66, 73, 74, 52,
+ 81, 50, 80, 49, 65, 72, 77, 64, 61, 48,
+ 75, 62, 60, 0, 70, 78, 82, 0, 174, 175,
+ 176, 179, 169, 170, 171, 67, 79, 648, 470, 657,
+ 658, 659, 656, 0, 660, 0, 0, 59, 0, 650,
+ 0, 653, 655, 649, 0, 647, 0, 0, 651, 0,
+ 45, 0, 472, 0, 0, 0, 24, 0, 652, 654,
+ 0, 0, 336, 335, 0, 0, 0, 67, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 337, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 28,
+ 0, 0, 45, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 68, 0, 0, 56, 0, 0, 51, 0, 76, 71,
+ 63, 57, 58, 47, 0, 69, 54, 55, 53, 66,
+ 73, 74, 52, 81, 50, 80, 49, 65, 72, 77,
+ 64, 61, 48, 75, 62, 60, 0, 70, 78, 82,
+ 0, 0, 68, 0, 0, 56, 0, 0, 51, 79,
+ 76, 71, 63, 57, 58, 47, 0, 69, 54, 55,
+ 53, 66, 73, 74, 52, 81, 50, 80, 49, 65,
+ 72, 77, 64, 61, 48, 75, 62, 60, 24, 70,
+ 78, 82, 0, 0, 0, 0, 0, 0, 0, 67,
+ 0, 79, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 59, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 28, 0, 0, 92, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 67, 0, 0, 208, 0, 0, 0, 0, 0,
+ 0, 0, 0, 59, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 92, 0, 210, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 68, 0, 0, 56, 0, 0,
+ 51, 0, 76, 71, 63, 57, 58, 47, 0, 69,
+ 54, 55, 53, 66, 73, 74, 52, 81, 50, 80,
+ 49, 65, 72, 77, 64, 61, 48, 75, 62, 60,
+ 0, 70, 78, 82, 0, 0, 68, 0, 0, 56,
+ 0, 0, 51, 79, 76, 71, 63, 57, 58, 47,
+ 0, 69, 54, 55, 53, 66, 73, 74, 52, 81,
+ 50, 80, 49, 65, 72, 77, 64, 61, 48, 75,
+ 62, 60, 67, 70, 78, 82, 0, 0, 0, 0,
+ 0, 0, 0, 0, 59, 79, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 45, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 67, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 59, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 45,
+ 0, 472, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 42, 0, 0, 0, 0, 43, 68, 0, 0,
+ 56, 0, 0, 51, 0, 76, 71, 63, 57, 58,
+ 47, 0, 69, 54, 55, 53, 66, 73, 74, 52,
+ 81, 50, 80, 49, 65, 72, 77, 64, 61, 48,
+ 75, 62, 60, 0, 70, 78, 82, 0, 0, 68,
+ 0, 0, 56, 0, 0, 51, 79, 76, 71, 63,
+ 57, 58, 47, 0, 69, 54, 55, 53, 66, 73,
+ 74, 52, 81, 50, 80, 49, 65, 72, 77, 64,
+ 61, 48, 75, 62, 60, 67, 70, 78, 82, 0,
+ 0, 0, 0, 0, 0, 0, 0, 59, 79, 0,
+ 0, 0, 0, 0, 0, 0, 0, 28, 0, 0,
+ 92, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 67, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 59,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 92, 0, 210, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 68, 0, 0, 56, 0, 0, 51, 0, 76, 71,
+ 63, 57, 58, 47, 0, 69, 54, 55, 53, 66,
+ 73, 74, 52, 81, 50, 80, 49, 65, 72, 77,
+ 64, 61, 48, 75, 62, 60, 0, 70, 78, 82,
+ 0, 0, 68, 0, 0, 56, 0, 0, 51, 79,
+ 76, 71, 63, 57, 58, 47, 0, 69, 54, 55,
+ 53, 66, 73, 74, 52, 81, 50, 80, 49, 65,
+ 72, 77, 64, 61, 48, 75, 62, 60, 67, 70,
+ 78, 82, 0, 0, 0, 188, 0, 0, 0, 0,
+ 59, 79, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 92, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 67, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 59, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 45, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 68, 0, 0, 56, 0, 0, 51,
+ 0, 76, 71, 63, 57, 58, 47, 0, 69, 54,
+ 55, 53, 66, 73, 74, 52, 81, 50, 80, 49,
+ 65, 72, 77, 64, 61, 48, 75, 62, 60, 0,
+ 70, 78, 82, 0, 0, 68, 0, 0, 56, 0,
+ 0, 51, 79, 76, 71, 63, 57, 58, 47, 0,
+ 69, 54, 55, 53, 66, 73, 74, 52, 81, 50,
+ 80, 49, 65, 72, 77, 64, 61, 48, 75, 62,
+ 60, 67, 70, 78, 82, 0, 0, 0, 0, 0,
+ 0, 0, 0, 59, 79, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 180, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 67, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 59, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 92, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 68, 0, 0, 56,
+ 0, 0, 51, 0, 76, 71, 63, 57, 58, 47,
+ 0, 69, 54, 55, 53, 66, 73, 74, 52, 81,
+ 50, 80, 49, 65, 72, 77, 64, 61, 48, 75,
+ 62, 60, 0, 70, 78, 82, 0, 0, 68, 0,
+ 0, 56, 0, 0, 51, 79, 76, 71, 63, 57,
+ 58, 47, 0, 69, 54, 55, 53, 66, 73, 74,
+ 52, 81, 50, 80, 49, 65, 72, 77, 64, 61,
+ 48, 75, 62, 60, 0, 70, 78, 82, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 79,
+}
+var yyPact = [...]int{
+
+ 58, -1000, -107, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, 428, 462, -1000, -1000, -1000, 344, 3315, -8,
+ -17, -21, 3846, 3846, -1000, 331, 87, -1000, -1000, -1000,
+ -1000, 408, 420, 331, 403, -27, -1000, 3478, 456, -1000,
+ 204, -15, -29, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 3846, -31, -31, -13, 455, 3846, -1000,
+ -1000, 17, -1000, -1000, -1000, 394, 218, -98, -1000, 2720,
+ 2720, 428, -1000, 331, -1000, 3641, -1000, 141, 292, 343,
+ -1000, -1000, -1000, 391, 3152, 3194, 3846, 261, -1000, 647,
+ 215, 3846, 80, -9, 3846, 389, 3846, 3846, -19, -1000,
+ 2050, 2318, -1000, -1000, -1000, -1000, 131, -1000, 2720, 968,
+ 304, 304, -1000, -1000, -1000, -1000, -1000, 2845, 2845, 2845,
+ 2845, 2845, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, 304, 6, -1000, 2587, 304,
+ 304, 304, 2720, 304, -1000, -1000, -1000, 304, 304, 304,
+ -1000, -1000, 302, -1000, 208, 408, 218, 294, 3846, -1000,
+ -1000, 3804, 3478, 3478, 3478, 3478, -1000, 364, 357, 356,
+ 362, 3846, -1000, 254, 218, 3152, -1000, -1000, 3520, -1000,
+ -1000, 451, 1168, 114, 84, -90, -1000, -1000, 310, -1000,
+ 310, 310, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 310, 310, 310, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, 341, 341, 341, 311, 311, 345, -1000, 386, -41,
+ -30, -1000, -1000, -1000, -1000, 3846, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ 286, -1000, -1000, 2978, 5, 2720, 2720, 117, 2720, 2720,
+ 43, 2845, 135, 71, 2845, 2845, 2845, 2845, 2845, 2845,
+ 2845, 2845, 2845, 2845, 2845, 2845, 2845, 2845, 2845, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 209, -1000, 331,
+ -1000, -1000, -1000, -1000, 989, 2185, 1903, 241, 133, 2587,
+ 2720, 3804, 394, 64, 133, 3804, 2318, 2318, 2318, 2720,
+ -1000, -1000, -1000, -1000, -1000, -1000, 3804, 304, -1000, 3020,
+ -1000, 306, -1000, 54, -1000, 4, 343, 334, 273, -1000,
+ -1000, -1000, -1000, 355, -1000, -1000, -1000, -1000, -1000, 218,
+ -1000, 428, 2720, 299, 839, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, 314, 380, 49, 206, -1000, -1000, 370,
+ -1000, 103, -92, -1000, -1000, 160, -1000, -1000, -1000, -1000,
+ -1000, -1000, 155, -1000, -1000, -1000, 154, -1000, 3846, -1000,
+ 204, -1000, 3846, 3804, -1000, 451, 2318, 3478, -1000, -1000,
+ 3357, -1000, -1000, 1609, 43, 56, -1000, -1000, 127, -1000,
+ -1000, 133, 133, 2572, -1000, -1000, -1000, -1000, 135, 2845,
+ 2845, 2845, 371, 2572, 2436, 1086, 715, -1000, 143, 143,
+ 24, 24, 24, 24, 24, 19, 19, 218, -1000, 218,
+ 2318, 298, 304, 3, -1000, 2720, -1000, 200, 296, 2318,
+ 70, -1000, 2720, 218, 224, 224, 224, -1000, 295, 283,
+ -1000, -1000, 2451, 218, 231, 2, 428, 3804, 2720, 1903,
+ -1000, -1000, 2720, 312, -1000, -1000, -1000, 408, 133, 1168,
+ -1000, 1168, 3683, -1000, 172, -1000, -1000, -85, 12, -1000,
+ -1000, -1000, 234, 291, 226, 1462, -1000, -1000, -20, 446,
+ -1000, 284, -1000, -1000, 0, -1000, -1000, -1000, -1000, 371,
+ 2572, 1072, -1000, 2845, 2845, -1000, -1000, 224, 2318, 1756,
+ 133, 2889, 2845, 278, 60, -1000, 2720, 183, -1000, -1000,
+ -1000, -1000, 304, -1000, -1000, 277, 3683, 3683, 408, -1000,
+ 133, -1000, 133, 3683, -1000, 839, -1000, 181, -1000, 310,
+ -1000, 36, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 147,
+ -1000, 122, -1000, -1000, -1000, 3804, 430, 413, 1315, -1000,
+ 2845, 2572, 2572, -1000, 218, -1000, 218, 310, -1000, 310,
+ 311, 310, -55, 310, -60, 310, 310, 310, 310, 310,
+ 310, -1000, 2175, -103, -1000, 133, 2720, -1000, 304, -1000,
+ 331, -3, -1000, -1000, 178, -1000, -1000, 3683, -1000, -1000,
+ 376, -1000, 374, 232, 227, -1000, -1000, 2720, 2720, -1000,
+ 2572, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
+ -1000, -1000, -1000, -1000, -1000, -1000, -1000, 2845, 218, 166,
+ 133, 283, 218, 3683, 3683, -1000, -1000, -1000, -1000, -1000,
+ -1000, 133, 280, 2304, -1000, -1000, -1000, -1000, -1000, -1000,
+}
+var yyPgo = [...]int{
+
+ 0, 626, 29, 384, 625, 624, 619, 618, 617, 613,
+ 609, 601, 600, 599, 598, 444, 597, 592, 589, 36,
+ 588, 13, 51, 7, 26, 8, 585, 22, 52, 46,
+ 584, 27, 583, 582, 42, 581, 152, 579, 577, 31,
+ 576, 574, 569, 568, 567, 10, 564, 563, 561, 556,
+ 548, 16, 1, 4, 34, 9, 546, 134, 40, 545,
+ 18, 544, 543, 542, 540, 12, 535, 37, 534, 11,
+ 533, 532, 44, 17, 526, 525, 112, 522, 521, 35,
+ 0, 20, 14, 21, 520, 622, 32, 41, 518, 516,
+ 515, 514, 512, 510, 508, 507, 503, 498, 182, 496,
+ 495, 487, 43, 6, 485, 483, 482, 25, 481, 28,
+ 479, 478, 476, 15, 5, 475, 2, 474, 33, 473,
+ 472, 119, 3, 470,
+}
+var yyR1 = [...]int{
+
+ 0, 119, 120, 120, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 2, 2, 3, 3, 4, 4, 5,
+ 5, 6, 6, 18, 18, 18, 7, 8, 9, 9,
+ 12, 110, 111, 111, 111, 107, 92, 92, 92, 95,
+ 95, 93, 93, 93, 93, 93, 93, 94, 94, 94,
+ 94, 94, 96, 96, 96, 96, 96, 97, 97, 97,
+ 97, 97, 97, 97, 97, 97, 97, 98, 98, 102,
+ 102, 103, 103, 103, 100, 100, 101, 101, 104, 104,
+ 104, 99, 99, 99, 99, 99, 99, 105, 105, 106,
+ 106, 106, 106, 106, 108, 115, 115, 115, 115, 109,
+ 109, 117, 117, 116, 112, 112, 112, 113, 113, 113,
+ 114, 114, 114, 10, 10, 10, 118, 118, 11, 11,
+ 13, 13, 13, 13, 14, 14, 123, 15, 16, 16,
+ 17, 17, 19, 19, 23, 23, 22, 22, 24, 24,
+ 24, 24, 84, 84, 84, 83, 83, 26, 27, 27,
+ 28, 28, 29, 29, 29, 29, 38, 71, 71, 30,
+ 30, 30, 31, 31, 32, 32, 89, 89, 88, 88,
+ 88, 87, 87, 33, 33, 33, 34, 34, 35, 35,
+ 37, 37, 36, 36, 39, 39, 25, 25, 25, 25,
+ 25, 25, 25, 75, 75, 41, 41, 40, 40, 40,
+ 40, 40, 40, 40, 40, 40, 40, 50, 50, 50,
+ 50, 50, 50, 42, 42, 42, 42, 42, 42, 42,
+ 21, 21, 51, 51, 51, 57, 52, 52, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 45, 45, 45, 45, 45, 45,
+ 45, 45, 45, 45, 48, 48, 48, 46, 46, 46,
+ 46, 46, 46, 47, 47, 47, 49, 49, 49, 91,
+ 91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
+ 91, 91, 91, 91, 91, 91, 61, 61, 20, 20,
+ 59, 59, 60, 62, 62, 58, 58, 58, 44, 44,
+ 44, 44, 44, 44, 44, 63, 63, 64, 64, 65,
+ 65, 66, 66, 67, 68, 68, 68, 69, 69, 69,
+ 69, 43, 43, 43, 43, 43, 43, 70, 70, 70,
+ 70, 53, 53, 55, 55, 54, 56, 72, 72, 73,
+ 76, 76, 77, 77, 74, 74, 78, 78, 78, 81,
+ 81, 82, 82, 85, 85, 86, 86, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 79, 79,
+ 79, 79, 79, 79, 79, 79, 79, 79, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 121, 122, 90, 90, 90,
+}
+var yyR2 = [...]int{
+
+ 0, 2, 0, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 3, 5, 8, 4, 1, 3, 1,
+ 3, 5, 6, 1, 1, 3, 8, 7, 2, 7,
+ 4, 4, 1, 3, 3, 6, 3, 1, 1, 2,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,
+ 2, 2, 1, 2, 2, 2, 1, 2, 2, 1,
+ 1, 1, 1, 1, 1, 1, 1, 0, 3, 0,
+ 5, 0, 3, 5, 0, 1, 0, 1, 0, 1,
+ 2, 0, 2, 2, 2, 2, 2, 0, 1, 0,
+ 2, 1, 2, 1, 4, 2, 3, 2, 2, 1,
+ 1, 1, 3, 2, 0, 1, 3, 1, 2, 3,
+ 1, 1, 1, 6, 6, 8, 0, 1, 4, 4,
+ 4, 5, 3, 2, 2, 2, 0, 2, 0, 2,
+ 1, 2, 0, 1, 0, 1, 1, 3, 1, 2,
+ 3, 5, 0, 1, 2, 1, 1, 2, 1, 3,
+ 1, 1, 1, 1, 3, 3, 2, 1, 3, 4,
+ 4, 3, 2, 4, 0, 1, 0, 1, 0, 1,
+ 2, 1, 1, 1, 2, 2, 2, 3, 2, 2,
+ 2, 1, 1, 3, 0, 2, 1, 3, 3, 2,
+ 3, 1, 2, 0, 3, 1, 1, 3, 3, 4,
+ 4, 5, 3, 4, 5, 6, 2, 1, 2, 1,
+ 2, 1, 2, 1, 1, 1, 1, 1, 1, 1,
+ 0, 2, 1, 1, 1, 3, 1, 3, 1, 1,
+ 1, 1, 1, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 2, 2, 2, 2, 3,
+ 1, 1, 1, 1, 4, 5, 6, 6, 6, 8,
+ 7, 5, 4, 1, 1, 1, 4, 4, 4, 2,
+ 1, 2, 2, 2, 1, 2, 2, 1, 2, 2,
+ 2, 2, 2, 2, 2, 1, 0, 1, 0, 2,
+ 1, 2, 4, 0, 2, 1, 3, 5, 1, 1,
+ 1, 1, 1, 1, 1, 0, 3, 0, 2, 0,
+ 3, 1, 3, 2, 0, 1, 1, 0, 2, 4,
+ 4, 2, 1, 3, 5, 4, 6, 1, 3, 3,
+ 5, 1, 3, 1, 2, 3, 1, 1, 3, 3,
+ 0, 2, 0, 3, 0, 1, 0, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 0, 1, 1,
+}
+var yyChk = [...]int{
+
+ -1000, -119, -1, -2, -6, -7, -8, -9, -10, -11,
+ -13, -14, -3, -4, -18, 8, 9, -12, 91, 92,
+ 93, 106, 23, 107, 6, -121, 7, 146, 39, -120,
+ 152, -65, 14, -17, 5, -15, -123, -15, -15, -110,
+ 39, -78, 96, 101, -81, 42, -80, 115, 134, 128,
+ 126, 108, 124, 120, 118, 119, 105, 113, 114, 29,
+ 137, 133, 136, 112, 132, 129, 121, 17, 102, 117,
+ 139, 111, 130, 122, 123, 135, 110, 131, 140, 151,
+ 127, 125, 141, 96, 96, 97, 91, 97, 96, 142,
+ -36, -85, 42, -80, -36, -15, -2, 54, -69, 16,
+ 15, -5, -3, -121, 18, -74, 99, -16, -27, -28,
+ -29, -30, -38, -57, -121, -36, 10, -111, -107, 42,
+ 97, -77, 100, -36, -76, 100, -76, 96, 10, -36,
+ 90, -19, 19, -122, 41, 146, -25, -40, 56, -45,
+ 26, 21, -44, -41, -58, -56, -57, 79, 80, 87,
+ 57, 89, -48, -46, -47, -49, 44, 43, 45, 46,
+ 47, 48, 51, 52, 53, -81, -85, -54, -121, 147,
+ 148, 149, 59, 28, 143, 144, 145, 100, 85, 146,
+ 42, -80, -66, -67, -25, -65, -2, -37, 24, -36,
+ 50, 27, 40, -33, -34, -35, 30, 33, 35, 31,
+ 36, -89, 20, -27, -2, -121, -88, -87, 20, -85,
+ 44, -36, 41, 40, -92, -95, -97, -96, -93, -94,
+ 126, 127, 130, 131, 132, 133, 134, 135, 136, 137,
+ 108, 122, 123, 124, 125, 110, 111, 112, 113, 114,
+ 115, 117, 118, 119, 120, 121, 42, -36, 56, 95,
+ 94, -36, 21, -36, -36, 96, -86, -85, -79, 95,
+ 55, 20, 22, 138, 58, 15, 59, 91, 35, 144,
+ 145, 143, 148, 26, 9, 23, 107, 19, 83, 93,
+ 62, 63, 109, 21, 53, 10, 12, 13, 100, 99,
+ 73, 97, 33, 7, 89, 24, 70, 30, 25, 31,
+ 71, 16, 85, 36, 56, 51, 37, 54, 14, 34,
+ 72, 94, 146, 32, 6, 150, 27, 106, 96, 142,
+ 61, 98, 52, 5, 101, 8, 38, 28, 60, 11,
+ -22, -24, 81, -25, -85, 55, 54, 70, 40, 17,
+ -25, -42, 73, 56, 71, 72, 58, 76, 75, 86,
+ 79, 80, 81, 82, 83, 84, 85, 77, 78, 64,
+ 65, 66, 67, 68, 69, 74, -75, -121, -57, -121,
+ -45, -45, -45, -45, -45, -121, 90, -52, -25, -121,
+ -121, -121, -121, -61, -25, -121, -121, -121, -121, 40,
+ -68, 22, 23, -69, -122, -43, 27, 28, -2, -121,
+ -36, -72, -73, -58, -81, -85, -28, -29, -28, -29,
+ 30, 30, 30, 34, 30, -34, -85, -122, -122, -2,
+ -87, -39, 11, -112, -113, -114, -82, 44, 45, -81,
+ -79, -107, -108, -115, 102, 101, -109, 97, 25, -104,
+ 51, 56, -100, 140, -98, 39, -98, -98, -98, -98,
+ -98, -102, 39, -102, -102, -103, 39, -103, 37, 21,
+ -118, 103, 98, -118, -36, -26, 40, 10, -84, -83,
+ 20, -81, 44, 90, -25, -25, -50, 51, 56, 52,
+ 53, -25, -25, -45, -51, -54, -57, 49, 73, 71,
+ 72, 58, -45, -45, -45, -45, -45, -45, -45, -45,
+ -45, -45, -45, -45, -45, -45, -45, 42, -81, -23,
+ 19, -22, -82, -86, -79, 40, -122, -25, -58, -19,
+ -59, -60, 60, -58, -22, -22, -22, -67, -72, -53,
+ -55, -54, -121, -2, -70, -81, -39, 40, 64, 90,
+ -32, -31, 37, 38, -31, 30, -122, -65, -25, 40,
+ -114, 64, 39, 25, -109, 42, 42, -99, 26, 51,
+ -101, 141, 45, 45, 45, -36, -107, -36, -58, -39,
+ -24, -27, -83, 81, -86, 51, 52, 53, -51, -45,
+ -45, -45, -21, 109, 55, -122, -122, -22, -121, 90,
+ -25, 20, 40, -22, -62, -60, 62, -25, -122, -122,
+ -122, -122, 40, -122, -122, -122, 40, 90, -65, -73,
+ -25, -82, -25, 39, -69, -113, -114, -117, -116, -81,
+ 42, -105, 138, 44, 45, 46, 51, 143, 41, 40,
+ 41, 40, -90, -121, -82, 98, -63, 12, 90, -21,
+ 55, -45, -45, -122, -23, -82, -91, 126, 108, 124,
+ 120, 129, 139, 122, 140, 123, 113, 110, 111, 112,
+ 115, 44, -45, -65, 63, -25, 61, -55, 28, -2,
+ -121, -81, -81, -69, -71, -81, 41, 40, -98, -106,
+ 102, 25, 101, 45, 45, -58, -64, 13, 15, 81,
+ -45, -122, -122, -98, -98, -103, -98, 114, -98, 114,
+ -98, -98, -98, -98, -98, -98, -122, 40, -20, 150,
+ -25, -53, -2, 90, 40, 41, -116, 25, 25, 41,
+ 41, -25, -52, -45, -122, 44, -122, -81, -81, -122,
+}
+var yyDef = [...]int{
+
+ 0, -2, 2, -2, 5, 6, 7, 8, 9, 10,
+ 11, 12, 309, 0, 126, 126, 126, 0, 346, 0,
+ 0, 0, 0, 0, 126, 0, 23, 24, 464, 1,
+ 3, 317, 0, 0, 130, 344, 128, 0, 0, 28,
+ 0, 0, 342, 347, 348, 349, 350, 428, 429, 430,
+ 431, 432, 433, 434, 435, 436, 437, 438, 439, 440,
+ 441, 442, 443, 444, 445, 446, 447, 448, 449, 450,
+ 451, 452, 453, 454, 455, 456, 457, 458, 459, 460,
+ 461, 462, 463, 0, 340, 340, 0, 0, 0, 123,
+ 124, 182, 353, 354, 125, 132, 17, 0, 13, 0,
+ 0, 309, 19, 0, 131, 0, 345, 127, 0, 148,
+ 150, 151, 152, -2, 0, 168, 0, 0, 32, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 122,
+ 0, 0, 133, 18, 465, 25, 318, 186, 0, 191,
+ 193, 0, 228, 229, 230, 231, 232, 0, 0, 0,
+ 0, 0, 250, 251, 252, 253, 298, 299, 300, 301,
+ 302, 303, 304, 195, 196, 295, 0, 336, 0, 0,
+ 0, 0, 286, 0, 263, 264, 265, 0, 0, 0,
+ -2, -2, 310, 311, 314, 317, 17, 0, 0, 181,
+ 129, 0, 0, 0, 0, 0, 173, 0, 0, 0,
+ 0, 0, 167, 0, 17, 0, 156, 169, 0, 171,
+ 172, 184, 104, 0, 78, 74, 37, 38, 67, 40,
+ 67, 67, 59, 60, 61, 62, 63, 64, 65, 66,
+ 52, 67, 67, 67, 56, 41, 42, 43, 44, 45,
+ 46, 69, 69, 69, 71, 71, 0, 30, 0, 116,
+ 116, 118, 341, 119, 120, 0, 183, 355, 356, 357,
+ 358, 359, 360, 361, 362, 363, 364, 365, 366, 367,
+ 368, 369, 370, 371, 372, 373, 374, 375, 376, 377,
+ 378, 379, 380, 381, 382, 383, 384, 385, 386, 387,
+ 388, 389, 390, 391, 392, 393, 394, 395, 396, 397,
+ 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
+ 408, 409, 410, 411, 412, 413, 414, 415, 416, 417,
+ 418, 419, 420, 421, 422, 423, 424, 425, 426, 427,
+ 16, 136, 138, 142, 0, 0, 0, 0, 0, 0,
+ 189, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 213,
+ 214, 215, 216, 217, 218, 219, 192, 0, 206, 0,
+ 245, 246, 247, 248, 0, 134, 0, 0, 226, 0,
+ 0, 0, 132, 0, 287, 0, 0, 0, 0, 0,
+ 313, 315, 316, 14, 20, 21, 0, 0, -2, 0,
+ 180, 184, 337, 0, 295, 0, 149, 164, 0, 161,
+ 174, 175, 176, 0, 178, 179, 154, 155, 225, 17,
+ 170, 309, 0, 31, 105, 107, 110, 111, 112, 351,
+ 352, 33, 34, 0, 0, 0, 0, 99, 100, 81,
+ 79, 0, 76, 75, 39, 0, 57, 58, 53, 54,
+ 55, 47, 0, 48, 49, 50, 0, 51, 0, 343,
+ 0, 117, 0, 0, 121, 184, 0, 0, 139, 143,
+ 0, 145, 146, 0, 187, 188, 190, 207, 0, 209,
+ 211, 319, 320, 197, 198, 222, 223, 224, 0, 0,
+ 0, 0, 220, 202, 0, 233, 234, 235, 236, 237,
+ 238, 239, 240, 241, 242, 243, 244, 0, 249, 0,
+ 0, 135, 296, 0, -2, 0, 335, 0, 0, 0,
+ 293, 290, 0, 0, 0, 0, 0, 312, 22, 321,
+ 331, 333, 0, 17, 0, 327, 309, 0, 0, 0,
+ 159, 165, 0, 0, 160, 177, -2, 317, 185, 0,
+ 108, 0, 0, 95, 0, 97, 98, 87, 0, 80,
+ 36, 77, 0, 0, 0, 466, 113, 114, 0, 305,
+ 137, 147, 144, 140, 0, 208, 210, 212, 199, 220,
+ 203, 0, 200, 0, 0, 194, 254, 0, 134, 0,
+ 227, 0, 0, 309, 0, 291, 0, 0, 262, 266,
+ 267, 268, 0, 334, -2, 0, 0, 0, 317, 338,
+ 339, 296, 162, 0, 27, 106, 109, 0, 101, 67,
+ 96, 89, 88, 82, 83, 84, 85, 86, 68, 0,
+ 72, 0, 29, 467, 468, 0, 307, 0, 0, 201,
+ 0, 221, 204, 255, 0, 297, 0, 67, 270, 67,
+ 71, 67, 274, 67, 277, 67, 67, 67, 67, 67,
+ 67, 285, 0, 288, 261, 294, 0, 332, 0, -2,
+ 0, 329, 328, 26, 0, 157, 94, 0, 103, 35,
+ 0, 91, 93, 0, 0, 115, 15, 0, 0, 141,
+ 205, 256, 257, 269, 271, 272, 273, 275, 276, 278,
+ 279, 280, 281, 282, 283, 284, 258, 0, 0, 0,
+ 292, 324, 17, 0, 0, 163, 102, 90, 92, 70,
+ 73, 308, 306, 0, 260, 289, -2, 330, 158, 259,
+}
+var yyTok1 = [...]int{
+
+ 1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 57, 3, 3, 3, 84, 76, 3,
+ 39, 41, 81, 79, 40, 80, 90, 82, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 152,
+ 65, 64, 66, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 86, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ 3, 3, 3, 3, 75, 3, 87,
+}
+var yyTok2 = [...]int{
+
+ 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
+ 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
+ 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
+ 32, 33, 34, 35, 36, 37, 38, 42, 43, 44,
+ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
+ 55, 56, 58, 59, 60, 61, 62, 63, 67, 68,
+ 69, 70, 71, 72, 73, 74, 77, 78, 83, 85,
+ 88, 89, 91, 92, 93, 94, 95, 96, 97, 98,
+ 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
+ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118,
+ 119, 120, 121, 122, 123, 124, 125, 126, 127, 128,
+ 129, 130, 131, 132, 133, 134, 135, 136, 137, 138,
+ 139, 140, 141, 142, 143, 144, 145, 146, 147, 148,
+ 149, 150, 151,
+}
+var yyTok3 = [...]int{
+ 0,
+}
+
+var yyErrorMessages = [...]struct {
+ state int
+ token int
+ msg string
+}{}
+
+//line yaccpar:1
+
+/* parser for yacc output */
+
+var (
+ yyDebug = 0
+ yyErrorVerbose = false
+)
+
+type yyLexer interface {
+ Lex(lval *yySymType) int
+ Error(s string)
+}
+
+type yyParser interface {
+ Parse(yyLexer) int
+ Lookahead() int
+}
+
+type yyParserImpl struct {
+ lval yySymType
+ stack [yyInitialStackSize]yySymType
+ char int
+}
+
+func (p *yyParserImpl) Lookahead() int {
+ return p.char
+}
+
+func yyNewParser() yyParser {
+ return &yyParserImpl{}
+}
+
+const yyFlag = -1000
+
+func yyTokname(c int) string {
+ if c >= 1 && c-1 < len(yyToknames) {
+ if yyToknames[c-1] != "" {
+ return yyToknames[c-1]
+ }
+ }
+ return __yyfmt__.Sprintf("tok-%v", c)
+}
+
+func yyStatname(s int) string {
+ if s >= 0 && s < len(yyStatenames) {
+ if yyStatenames[s] != "" {
+ return yyStatenames[s]
+ }
+ }
+ return __yyfmt__.Sprintf("state-%v", s)
+}
+
+func yyErrorMessage(state, lookAhead int) string {
+ const TOKSTART = 4
+
+ if !yyErrorVerbose {
+ return "syntax error"
+ }
+
+ for _, e := range yyErrorMessages {
+ if e.state == state && e.token == lookAhead {
+ return "syntax error: " + e.msg
+ }
+ }
+
+ res := "syntax error: unexpected " + yyTokname(lookAhead)
+
+ // To match Bison, suggest at most four expected tokens.
+ expected := make([]int, 0, 4)
+
+ // Look for shiftable tokens.
+ base := yyPact[state]
+ for tok := TOKSTART; tok-1 < len(yyToknames); tok++ {
+ if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok {
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+ }
+
+ if yyDef[state] == -2 {
+ i := 0
+ for yyExca[i] != -1 || yyExca[i+1] != state {
+ i += 2
+ }
+
+ // Look for tokens that we accept or reduce.
+ for i += 2; yyExca[i] >= 0; i += 2 {
+ tok := yyExca[i]
+ if tok < TOKSTART || yyExca[i+1] == 0 {
+ continue
+ }
+ if len(expected) == cap(expected) {
+ return res
+ }
+ expected = append(expected, tok)
+ }
+
+ // If the default action is to accept or reduce, give up.
+ if yyExca[i+1] != 0 {
+ return res
+ }
+ }
+
+ for i, tok := range expected {
+ if i == 0 {
+ res += ", expecting "
+ } else {
+ res += " or "
+ }
+ res += yyTokname(tok)
+ }
+ return res
+}
+
+func yylex1(lex yyLexer, lval *yySymType) (char, token int) {
+ token = 0
+ char = lex.Lex(lval)
+ if char <= 0 {
+ token = yyTok1[0]
+ goto out
+ }
+ if char < len(yyTok1) {
+ token = yyTok1[char]
+ goto out
+ }
+ if char >= yyPrivate {
+ if char < yyPrivate+len(yyTok2) {
+ token = yyTok2[char-yyPrivate]
+ goto out
+ }
+ }
+ for i := 0; i < len(yyTok3); i += 2 {
+ token = yyTok3[i+0]
+ if token == char {
+ token = yyTok3[i+1]
+ goto out
+ }
+ }
+
+out:
+ if token == 0 {
+ token = yyTok2[1] /* unknown char */
+ }
+ if yyDebug >= 3 {
+ __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char))
+ }
+ return char, token
+}
+
+func yyParse(yylex yyLexer) int {
+ return yyNewParser().Parse(yylex)
+}
+
+func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int {
+ var yyn int
+ var yyVAL yySymType
+ var yyDollar []yySymType
+ _ = yyDollar // silence set and not used
+ yyS := yyrcvr.stack[:]
+
+ Nerrs := 0 /* number of errors */
+ Errflag := 0 /* error recovery flag */
+ yystate := 0
+ yyrcvr.char = -1
+ yytoken := -1 // yyrcvr.char translated into internal numbering
+ defer func() {
+ // Make sure we report no lookahead when not parsing.
+ yystate = -1
+ yyrcvr.char = -1
+ yytoken = -1
+ }()
+ yyp := -1
+ goto yystack
+
+ret0:
+ return 0
+
+ret1:
+ return 1
+
+yystack:
+ /* put a state and value onto the stack */
+ if yyDebug >= 4 {
+ __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate))
+ }
+
+ yyp++
+ if yyp >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyS[yyp] = yyVAL
+ yyS[yyp].yys = yystate
+
+yynewstate:
+ yyn = yyPact[yystate]
+ if yyn <= yyFlag {
+ goto yydefault /* simple state */
+ }
+ if yyrcvr.char < 0 {
+ yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
+ }
+ yyn += yytoken
+ if yyn < 0 || yyn >= yyLast {
+ goto yydefault
+ }
+ yyn = yyAct[yyn]
+ if yyChk[yyn] == yytoken { /* valid shift */
+ yyrcvr.char = -1
+ yytoken = -1
+ yyVAL = yyrcvr.lval
+ yystate = yyn
+ if Errflag > 0 {
+ Errflag--
+ }
+ goto yystack
+ }
+
+yydefault:
+ /* default state action */
+ yyn = yyDef[yystate]
+ if yyn == -2 {
+ if yyrcvr.char < 0 {
+ yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval)
+ }
+
+ /* look through exception table */
+ xi := 0
+ for {
+ if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate {
+ break
+ }
+ xi += 2
+ }
+ for xi += 2; ; xi += 2 {
+ yyn = yyExca[xi+0]
+ if yyn < 0 || yyn == yytoken {
+ break
+ }
+ }
+ yyn = yyExca[xi+1]
+ if yyn < 0 {
+ goto ret0
+ }
+ }
+ if yyn == 0 {
+ /* error ... attempt to resume parsing */
+ switch Errflag {
+ case 0: /* brand new error */
+ yylex.Error(yyErrorMessage(yystate, yytoken))
+ Nerrs++
+ if yyDebug >= 1 {
+ __yyfmt__.Printf("%s", yyStatname(yystate))
+ __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken))
+ }
+ fallthrough
+
+ case 1, 2: /* incompletely recovered error ... try again */
+ Errflag = 3
+
+ /* find a state where "error" is a legal shift action */
+ for yyp >= 0 {
+ yyn = yyPact[yyS[yyp].yys] + yyErrCode
+ if yyn >= 0 && yyn < yyLast {
+ yystate = yyAct[yyn] /* simulate a shift of "error" */
+ if yyChk[yystate] == yyErrCode {
+ goto yystack
+ }
+ }
+
+ /* the current p has no shift on "error", pop stack */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys)
+ }
+ yyp--
+ }
+ /* there is no state on the stack with an error shift ... abort */
+ goto ret1
+
+ case 3: /* no shift yet; clobber input char */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken))
+ }
+ if yytoken == yyEofCode {
+ goto ret1
+ }
+ yyrcvr.char = -1
+ yytoken = -1
+ goto yynewstate /* try again in the same state */
+ }
+ }
+
+ /* reduction by production yyn */
+ if yyDebug >= 2 {
+ __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate))
+ }
+
+ yynt := yyn
+ yypt := yyp
+ _ = yypt // guard against "declared and not used"
+
+ yyp -= yyR2[yyn]
+ // yyp is now the index of $0. Perform the default action. Iff the
+ // reduced production is ε, $1 is possibly out of range.
+ if yyp+1 >= len(yyS) {
+ nyys := make([]yySymType, len(yyS)*2)
+ copy(nyys, yyS)
+ yyS = nyys
+ }
+ yyVAL = yyS[yyp+1]
+
+ /* consult goto table to find next state */
+ yyn = yyR1[yyn]
+ yyg := yyPgo[yyn]
+ yyj := yyg + yyS[yyp].yys + 1
+
+ if yyj >= yyLast {
+ yystate = yyAct[yyg]
+ } else {
+ yystate = yyAct[yyj]
+ if yyChk[yystate] != -yyn {
+ yystate = yyAct[yyg]
+ }
+ }
+ // dummy call; replaced with literal code
+ switch yynt {
+
+ case 1:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:242
+ {
+ setParseTree(yylex, yyDollar[1].statement)
+ }
+ case 2:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:247
+ {
+ }
+ case 3:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:248
+ {
+ }
+ case 4:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:252
+ {
+ yyVAL.statement = yyDollar[1].selStmt
+ }
+ case 13:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:266
+ {
+ sel := yyDollar[1].selStmt.(*Select)
+ sel.OrderBy = yyDollar[2].orderBy
+ sel.Limit = yyDollar[3].limit
+ yyVAL.selStmt = sel
+ }
+ case 14:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:273
+ {
+ yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit}
+ }
+ case 15:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line sql.y:280
+ {
+ yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Distinct: yyDollar[3].str, SelectExprs: yyDollar[4].selectExprs, From: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), GroupBy: GroupBy(yyDollar[7].exprs), Having: NewWhere(HavingStr, yyDollar[8].expr)}
+ }
+ case 16:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:284
+ {
+ yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Distinct: yyDollar[3].str, SelectExprs: yyDollar[4].selectExprs}
+ }
+ case 17:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:290
+ {
+ yyVAL.selStmt = yyDollar[1].selStmt
+ }
+ case 18:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:294
+ {
+ yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt}
+ }
+ case 19:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:300
+ {
+ yyVAL.selStmt = yyDollar[1].selStmt
+ }
+ case 20:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:304
+ {
+ yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt}
+ }
+ case 21:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:311
+ {
+ // insert_data returns a *Insert pre-filled with Columns & Values
+ ins := yyDollar[5].ins
+ ins.Action = yyDollar[1].str
+ ins.Comments = yyDollar[2].bytes2
+ ins.Ignore = yyDollar[3].str
+ ins.Table = yyDollar[4].tableName
+ yyVAL.statement = ins
+ }
+ case 22:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:321
+ {
+ cols := make(Columns, 0, len(yyDollar[6].updateExprs))
+ vals := make(ValTuple, 0, len(yyDollar[6].updateExprs))
+ for _, updateList := range yyDollar[6].updateExprs {
+ cols = append(cols, updateList.Name.Name)
+ vals = append(vals, updateList.Expr)
+ }
+ yyVAL.statement = &Insert{Action: yyDollar[1].str, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[4].tableName, Columns: cols, Rows: Values{vals}}
+ }
+ case 23:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:333
+ {
+ yyVAL.str = InsertStr
+ }
+ case 24:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:337
+ {
+ yyVAL.str = ReplaceStr
+ }
+ case 25:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:341
+ {
+ yyVAL.str = ReplaceStr
+ }
+ case 26:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line sql.y:347
+ {
+ yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), TableExprs: yyDollar[3].tableExprs, Exprs: yyDollar[5].updateExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit}
+ }
+ case 27:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line sql.y:353
+ {
+ yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Where: NewWhere(WhereStr, yyDollar[5].expr), OrderBy: yyDollar[6].orderBy, Limit: yyDollar[7].limit}
+ }
+ case 28:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:359
+ {
+ yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec
+ yyVAL.statement = yyDollar[1].ddl
+ }
+ case 29:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line sql.y:364
+ {
+ // Change this to an alter statement
+ yyVAL.statement = &DDL{Action: CreateIndexStr, Table: yyDollar[6].tableName, NewName: yyDollar[6].tableName}
+ }
+ case 30:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:371
+ {
+ yyVAL.ddl = &DDL{Action: CreateStr, NewName: yyDollar[4].tableName}
+ setDDL(yylex, yyVAL.ddl)
+ }
+ case 31:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:378
+ {
+ yyVAL.TableSpec = yyDollar[2].TableSpec
+ yyVAL.TableSpec.Options = yyDollar[4].str
+ }
+ case 32:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:385
+ {
+ yyVAL.TableSpec = &TableSpec{}
+ yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition)
+ }
+ case 33:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:390
+ {
+ yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition)
+ }
+ case 34:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:394
+ {
+ yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition)
+ }
+ case 35:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:400
+ {
+ yyDollar[2].columnType.NotNull = yyDollar[3].boolVal
+ yyDollar[2].columnType.Default = yyDollar[4].optVal
+ yyDollar[2].columnType.Autoincrement = yyDollar[5].boolVal
+ yyDollar[2].columnType.KeyOpt = yyDollar[6].colKeyOpt
+ yyVAL.columnDefinition = &ColumnDefinition{Name: NewColIdent(string(yyDollar[1].bytes)), Type: yyDollar[2].columnType}
+ }
+ case 36:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:409
+ {
+ yyVAL.columnType = yyDollar[1].columnType
+ yyVAL.columnType.Unsigned = yyDollar[2].boolVal
+ yyVAL.columnType.Zerofill = yyDollar[3].boolVal
+ }
+ case 39:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:419
+ {
+ yyVAL.columnType = yyDollar[1].columnType
+ yyVAL.columnType.Length = yyDollar[2].optVal
+ }
+ case 40:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:424
+ {
+ yyVAL.columnType = yyDollar[1].columnType
+ }
+ case 41:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:430
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 42:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:434
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 43:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:438
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 44:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:442
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 45:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:446
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 46:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:450
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 47:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:456
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 48:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:462
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 49:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:468
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 50:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:474
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 51:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:480
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 52:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:488
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 53:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:492
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 54:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:496
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 55:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:500
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 56:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:504
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 57:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:510
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 58:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:514
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 59:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:518
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 60:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:522
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 61:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:526
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 62:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:530
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 63:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:534
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 64:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:538
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 65:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:542
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 66:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:546
+ {
+ yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)}
+ }
+ case 67:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:551
+ {
+ yyVAL.optVal = nil
+ }
+ case 68:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:555
+ {
+ yyVAL.optVal = NewIntVal(yyDollar[2].bytes)
+ }
+ case 69:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:560
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{}
+ }
+ case 70:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:564
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{
+ Length: NewIntVal(yyDollar[2].bytes),
+ Scale: NewIntVal(yyDollar[4].bytes),
+ }
+ }
+ case 71:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:572
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{}
+ }
+ case 72:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:576
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{
+ Length: NewIntVal(yyDollar[2].bytes),
+ }
+ }
+ case 73:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:582
+ {
+ yyVAL.LengthScaleOption = LengthScaleOption{
+ Length: NewIntVal(yyDollar[2].bytes),
+ Scale: NewIntVal(yyDollar[4].bytes),
+ }
+ }
+ case 74:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:590
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 75:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:594
+ {
+ yyVAL.boolVal = BoolVal(true)
+ }
+ case 76:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:599
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 77:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:603
+ {
+ yyVAL.boolVal = BoolVal(true)
+ }
+ case 78:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:609
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 79:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:613
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 80:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:617
+ {
+ yyVAL.boolVal = BoolVal(true)
+ }
+ case 81:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:622
+ {
+ yyVAL.optVal = nil
+ }
+ case 82:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:626
+ {
+ yyVAL.optVal = NewStrVal(yyDollar[2].bytes)
+ }
+ case 83:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:630
+ {
+ yyVAL.optVal = NewIntVal(yyDollar[2].bytes)
+ }
+ case 84:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:634
+ {
+ yyVAL.optVal = NewFloatVal(yyDollar[2].bytes)
+ }
+ case 85:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:638
+ {
+ yyVAL.optVal = NewValArg(yyDollar[2].bytes)
+ }
+ case 86:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:642
+ {
+ yyVAL.optVal = NewValArg(yyDollar[2].bytes)
+ }
+ case 87:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:647
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 88:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:651
+ {
+ yyVAL.boolVal = BoolVal(true)
+ }
+ case 89:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:656
+ {
+ yyVAL.colKeyOpt = colKeyNone
+ }
+ case 90:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:660
+ {
+ yyVAL.colKeyOpt = colKeyPrimary
+ }
+ case 91:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:664
+ {
+ yyVAL.colKeyOpt = colKey
+ }
+ case 92:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:668
+ {
+ yyVAL.colKeyOpt = colKeyUniqueKey
+ }
+ case 93:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:672
+ {
+ yyVAL.colKeyOpt = colKeyUnique
+ }
+ case 94:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:678
+ {
+ yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns}
+ }
+ case 95:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:684
+ {
+ yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true}
+ }
+ case 96:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:688
+ {
+ yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(string(yyDollar[3].bytes)), Unique: true}
+ }
+ case 97:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:692
+ {
+ yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: true}
+ }
+ case 98:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:696
+ {
+ yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: false}
+ }
+ case 99:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:702
+ {
+ yyVAL.str = string(yyDollar[1].bytes)
+ }
+ case 100:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:706
+ {
+ yyVAL.str = string(yyDollar[1].bytes)
+ }
+ case 101:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:712
+ {
+ yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn}
+ }
+ case 102:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:716
+ {
+ yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn)
+ }
+ case 103:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:722
+ {
+ yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].optVal}
+ }
+ case 104:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:727
+ {
+ yyVAL.str = ""
+ }
+ case 105:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:731
+ {
+ yyVAL.str = " " + string(yyDollar[1].str)
+ }
+ case 106:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:735
+ {
+ yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str)
+ }
+ case 107:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:743
+ {
+ yyVAL.str = yyDollar[1].str
+ }
+ case 108:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:747
+ {
+ yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str
+ }
+ case 109:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:751
+ {
+ yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str
+ }
+ case 110:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:757
+ {
+ yyVAL.str = yyDollar[1].colIdent.String()
+ }
+ case 111:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:761
+ {
+ yyVAL.str = "'" + string(yyDollar[1].bytes) + "'"
+ }
+ case 112:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:765
+ {
+ yyVAL.str = string(yyDollar[1].bytes)
+ }
+ case 113:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:771
+ {
+ yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName}
+ }
+ case 114:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:775
+ {
+ // Change this to a rename statement
+ yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].tableName, NewName: yyDollar[6].tableName}
+ }
+ case 115:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line sql.y:780
+ {
+ // Rename an index can just be an alter
+ yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName}
+ }
+ case 116:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:786
+ {
+ }
+ case 118:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:790
+ {
+ var exists bool
+ if yyDollar[3].byt != 0 {
+ exists = true
+ }
+ yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName, IfExists: exists}
+ }
+ case 119:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:798
+ {
+ var exists bool
+ if yyDollar[3].byt != 0 {
+ exists = true
+ }
+ yyVAL.statement = &DDL{Action: DropIndexStr, Table: yyDollar[4].tableName, IfExists: exists}
+ }
+ case 120:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:808
+ {
+ yyVAL.statement = &Show{Type: string(yyDollar[3].bytes), ShowCreate: true, OnTable: yyDollar[4].tableName}
+ }
+ case 121:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:812
+ {
+ yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), OnTable: yyDollar[5].tableName}
+ }
+ case 122:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:816
+ {
+ yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), OnTable: yyDollar[3].tableName}
+ }
+ case 123:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:820
+ {
+ yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)}
+ }
+ case 124:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:826
+ {
+ yyVAL.statement = &Show{Type: "table", OnTable: yyDollar[2].tableName}
+ }
+ case 125:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:830
+ {
+ yyVAL.statement = &Show{Type: "table", OnTable: yyDollar[2].tableName}
+ }
+ case 126:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:835
+ {
+ setAllowComments(yylex, true)
+ }
+ case 127:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:839
+ {
+ yyVAL.bytes2 = yyDollar[2].bytes2
+ setAllowComments(yylex, false)
+ }
+ case 128:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:845
+ {
+ yyVAL.bytes2 = nil
+ }
+ case 129:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:849
+ {
+ yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes)
+ }
+ case 130:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:855
+ {
+ yyVAL.str = UnionStr
+ }
+ case 131:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:859
+ {
+ yyVAL.str = UnionAllStr
+ }
+ case 132:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:864
+ {
+ yyVAL.str = ""
+ }
+ case 133:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:868
+ {
+ yyVAL.str = DistinctStr
+ }
+ case 134:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:873
+ {
+ yyVAL.selectExprs = nil
+ }
+ case 135:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:877
+ {
+ yyVAL.selectExprs = yyDollar[1].selectExprs
+ }
+ case 136:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:883
+ {
+ yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr}
+ }
+ case 137:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:887
+ {
+ yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr)
+ }
+ case 138:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:893
+ {
+ yyVAL.selectExpr = &StarExpr{}
+ }
+ case 139:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:897
+ {
+ yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent}
+ }
+ case 140:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:901
+ {
+ yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}}
+ }
+ case 141:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:905
+ {
+ yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}}
+ }
+ case 142:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:910
+ {
+ yyVAL.colIdent = ColIdent{}
+ }
+ case 143:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:914
+ {
+ yyVAL.colIdent = yyDollar[1].colIdent
+ }
+ case 144:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:918
+ {
+ yyVAL.colIdent = yyDollar[2].colIdent
+ }
+ case 146:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:925
+ {
+ yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes))
+ }
+ case 147:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:931
+ {
+ yyVAL.tableExprs = yyDollar[2].tableExprs
+ }
+ case 148:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:937
+ {
+ yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr}
+ }
+ case 149:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:941
+ {
+ yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr)
+ }
+ case 152:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:951
+ {
+ yyVAL.tableExpr = yyDollar[1].aliasedTableName
+ }
+ case 153:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:955
+ {
+ yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery}
+ }
+ case 154:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:959
+ {
+ yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent}
+ }
+ case 155:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:963
+ {
+ yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs}
+ }
+ case 156:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:969
+ {
+ yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent}
+ }
+ case 157:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:975
+ {
+ yyVAL.columns = Columns{yyDollar[1].colIdent}
+ }
+ case 158:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:979
+ {
+ yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent)
+ }
+ case 159:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:985
+ {
+ yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition}
+ }
+ case 160:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:989
+ {
+ yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition}
+ }
+ case 161:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:993
+ {
+ yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr}
+ }
+ case 162:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:999
+ {
+ yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr}
+ }
+ case 163:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1001
+ {
+ yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns}
+ }
+ case 164:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1005
+ {
+ yyVAL.joinCondition = JoinCondition{}
+ }
+ case 165:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1007
+ {
+ yyVAL.joinCondition = yyDollar[1].joinCondition
+ }
+ case 166:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1010
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 167:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1012
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 168:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1015
+ {
+ yyVAL.tableIdent = NewTableIdent("")
+ }
+ case 169:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1019
+ {
+ yyVAL.tableIdent = yyDollar[1].tableIdent
+ }
+ case 170:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1023
+ {
+ yyVAL.tableIdent = yyDollar[2].tableIdent
+ }
+ case 172:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1030
+ {
+ yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes))
+ }
+ case 173:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1036
+ {
+ yyVAL.str = JoinStr
+ }
+ case 174:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1040
+ {
+ yyVAL.str = InnerJoinStr
+ }
+ case 175:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1044
+ {
+ yyVAL.str = CrossJoinStr
+ }
+ case 176:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1050
+ {
+ yyVAL.str = LeftJoinStr
+ }
+ case 177:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1054
+ {
+ yyVAL.str = LeftJoinStr
+ }
+ case 178:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1060
+ {
+ yyVAL.str = NaturalJoinStr
+ }
+ case 179:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1064
+ {
+ yyVAL.str = NaturalLeftJoinStr
+ }
+ case 180:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1070
+ {
+ yyVAL.tableName = yyDollar[2].tableName
+ }
+ case 181:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1074
+ {
+ yyVAL.tableName = yyDollar[1].tableName
+ }
+ case 182:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1080
+ {
+ yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent}
+ }
+ case 183:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1084
+ {
+ yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}
+ }
+ case 184:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1089
+ {
+ yyVAL.expr = nil
+ }
+ case 185:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1093
+ {
+ yyVAL.expr = yyDollar[2].expr
+ }
+ case 186:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1099
+ {
+ yyVAL.expr = yyDollar[1].expr
+ }
+ case 187:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1103
+ {
+ yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr}
+ }
+ case 188:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1107
+ {
+ yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr}
+ }
+ case 189:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1111
+ {
+ yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr}
+ }
+ case 190:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1115
+ {
+ yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr}
+ }
+ case 191:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1119
+ {
+ yyVAL.expr = yyDollar[1].expr
+ }
+ case 192:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1123
+ {
+ yyVAL.expr = &Default{ColName: yyDollar[2].str}
+ }
+ case 193:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1129
+ {
+ yyVAL.str = ""
+ }
+ case 194:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1133
+ {
+ yyVAL.str = string(yyDollar[2].bytes)
+ }
+ case 195:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1139
+ {
+ yyVAL.boolVal = BoolVal(true)
+ }
+ case 196:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1143
+ {
+ yyVAL.boolVal = BoolVal(false)
+ }
+ case 197:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1149
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr}
+ }
+ case 198:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1153
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple}
+ }
+ case 199:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1157
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple}
+ }
+ case 200:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1161
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr}
+ }
+ case 201:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1165
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr}
+ }
+ case 202:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1169
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr}
+ }
+ case 203:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1173
+ {
+ yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr}
+ }
+ case 204:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1177
+ {
+ yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr}
+ }
+ case 205:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:1181
+ {
+ yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr}
+ }
+ case 206:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1185
+ {
+ yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery}
+ }
+ case 207:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1191
+ {
+ yyVAL.str = IsNullStr
+ }
+ case 208:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1195
+ {
+ yyVAL.str = IsNotNullStr
+ }
+ case 209:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1199
+ {
+ yyVAL.str = IsTrueStr
+ }
+ case 210:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1203
+ {
+ yyVAL.str = IsNotTrueStr
+ }
+ case 211:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1207
+ {
+ yyVAL.str = IsFalseStr
+ }
+ case 212:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1211
+ {
+ yyVAL.str = IsNotFalseStr
+ }
+ case 213:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1217
+ {
+ yyVAL.str = EqualStr
+ }
+ case 214:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1221
+ {
+ yyVAL.str = LessThanStr
+ }
+ case 215:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1225
+ {
+ yyVAL.str = GreaterThanStr
+ }
+ case 216:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1229
+ {
+ yyVAL.str = LessEqualStr
+ }
+ case 217:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1233
+ {
+ yyVAL.str = GreaterEqualStr
+ }
+ case 218:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1237
+ {
+ yyVAL.str = NotEqualStr
+ }
+ case 219:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1241
+ {
+ yyVAL.str = NullSafeNotEqualStr
+ }
+ case 220:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1246
+ {
+ yyVAL.expr = nil
+ }
+ case 221:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1250
+ {
+ yyVAL.expr = yyDollar[2].expr
+ }
+ case 222:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1256
+ {
+ yyVAL.colTuple = yyDollar[1].valTuple
+ }
+ case 223:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1260
+ {
+ yyVAL.colTuple = yyDollar[1].subquery
+ }
+ case 224:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1264
+ {
+ yyVAL.colTuple = ListArg(yyDollar[1].bytes)
+ }
+ case 225:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1270
+ {
+ yyVAL.subquery = &Subquery{yyDollar[2].selStmt}
+ }
+ case 226:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1276
+ {
+ yyVAL.exprs = Exprs{yyDollar[1].expr}
+ }
+ case 227:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1280
+ {
+ yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr)
+ }
+ case 228:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1286
+ {
+ yyVAL.expr = yyDollar[1].expr
+ }
+ case 229:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1290
+ {
+ yyVAL.expr = yyDollar[1].boolVal
+ }
+ case 230:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1294
+ {
+ yyVAL.expr = yyDollar[1].colName
+ }
+ case 231:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1298
+ {
+ yyVAL.expr = yyDollar[1].expr
+ }
+ case 232:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1302
+ {
+ yyVAL.expr = yyDollar[1].subquery
+ }
+ case 233:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1306
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr}
+ }
+ case 234:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1310
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr}
+ }
+ case 235:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1314
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr}
+ }
+ case 236:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1318
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr}
+ }
+ case 237:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1322
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr}
+ }
+ case 238:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1326
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr}
+ }
+ case 239:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1330
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr}
+ }
+ case 240:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1334
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr}
+ }
+ case 241:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1338
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr}
+ }
+ case 242:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1342
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr}
+ }
+ case 243:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1346
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr}
+ }
+ case 244:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1350
+ {
+ yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr}
+ }
+ case 245:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1354
+ {
+ if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal {
+ yyVAL.expr = num
+ } else {
+ yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr}
+ }
+ }
+ case 246:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1362
+ {
+ if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal {
+ // Handle double negative
+ if num.Val[0] == '-' {
+ num.Val = num.Val[1:]
+ yyVAL.expr = num
+ } else {
+ yyVAL.expr = NewIntVal(append([]byte("-"), num.Val...))
+ }
+ } else {
+ yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr}
+ }
+ }
+ case 247:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1376
+ {
+ yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr}
+ }
+ case 248:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1380
+ {
+ yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr}
+ }
+ case 249:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1384
+ {
+ // This rule prevents the usage of INTERVAL
+ // as a function. If support is needed for that,
+ // we'll need to revisit this. The solution
+ // will be non-trivial because of grammar conflicts.
+ yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()}
+ }
+ case 254:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1402
+ {
+ yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs}
+ }
+ case 255:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1406
+ {
+ yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs}
+ }
+ case 256:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:1410
+ {
+ yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs}
+ }
+ case 257:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:1420
+ {
+ yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType}
+ }
+ case 258:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:1424
+ {
+ yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil}
+ }
+ case 259:
+ yyDollar = yyS[yypt-8 : yypt+1]
+ //line sql.y:1428
+ {
+ yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr}
+ }
+ case 260:
+ yyDollar = yyS[yypt-7 : yypt+1]
+ //line sql.y:1432
+ {
+ yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str}
+ }
+ case 261:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1436
+ {
+ yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr}
+ }
+ case 262:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1440
+ {
+ yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName}
+ }
+ case 263:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1450
+ {
+ yyVAL.expr = &TimeExpr{Expr: NewColIdent("current_timestamp")}
+ }
+ case 264:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1455
+ {
+ yyVAL.expr = &TimeExpr{Expr: NewColIdent("current_date")}
+ }
+ case 265:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1460
+ {
+ yyVAL.expr = &TimeExpr{Expr: NewColIdent("current_time")}
+ }
+ case 266:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1470
+ {
+ yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs}
+ }
+ case 267:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1474
+ {
+ yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs}
+ }
+ case 268:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1478
+ {
+ yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs}
+ }
+ case 269:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1484
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 270:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1488
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 271:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1492
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 272:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1496
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length
+ yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale
+ }
+ case 273:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1502
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 274:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1506
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 275:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1510
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 276:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1514
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 277:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1518
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 278:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1522
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 279:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1526
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 280:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1530
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 281:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1534
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 282:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1538
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 283:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1542
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 284:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1546
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal}
+ }
+ case 285:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1550
+ {
+ yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)}
+ }
+ case 286:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1555
+ {
+ yyVAL.expr = nil
+ }
+ case 287:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1559
+ {
+ yyVAL.expr = yyDollar[1].expr
+ }
+ case 288:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1564
+ {
+ yyVAL.str = string("")
+ }
+ case 289:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1568
+ {
+ yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'"
+ }
+ case 290:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1574
+ {
+ yyVAL.whens = []*When{yyDollar[1].when}
+ }
+ case 291:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1578
+ {
+ yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when)
+ }
+ case 292:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1584
+ {
+ yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr}
+ }
+ case 293:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1589
+ {
+ yyVAL.expr = nil
+ }
+ case 294:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1593
+ {
+ yyVAL.expr = yyDollar[2].expr
+ }
+ case 295:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1599
+ {
+ yyVAL.colName = &ColName{Name: yyDollar[1].colIdent}
+ }
+ case 296:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1603
+ {
+ yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent}
+ }
+ case 297:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1607
+ {
+ yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent}
+ }
+ case 298:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1613
+ {
+ yyVAL.expr = NewStrVal(yyDollar[1].bytes)
+ }
+ case 299:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1617
+ {
+ yyVAL.expr = NewHexVal(yyDollar[1].bytes)
+ }
+ case 300:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1621
+ {
+ yyVAL.expr = NewIntVal(yyDollar[1].bytes)
+ }
+ case 301:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1625
+ {
+ yyVAL.expr = NewFloatVal(yyDollar[1].bytes)
+ }
+ case 302:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1629
+ {
+ yyVAL.expr = NewHexNum(yyDollar[1].bytes)
+ }
+ case 303:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1633
+ {
+ yyVAL.expr = NewValArg(yyDollar[1].bytes)
+ }
+ case 304:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1637
+ {
+ yyVAL.expr = &NullVal{}
+ }
+ case 305:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1642
+ {
+ yyVAL.exprs = nil
+ }
+ case 306:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1646
+ {
+ yyVAL.exprs = yyDollar[3].exprs
+ }
+ case 307:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1651
+ {
+ yyVAL.expr = nil
+ }
+ case 308:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1655
+ {
+ yyVAL.expr = yyDollar[2].expr
+ }
+ case 309:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1660
+ {
+ yyVAL.orderBy = nil
+ }
+ case 310:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1664
+ {
+ yyVAL.orderBy = yyDollar[3].orderBy
+ }
+ case 311:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1670
+ {
+ yyVAL.orderBy = OrderBy{yyDollar[1].order}
+ }
+ case 312:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1674
+ {
+ yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order)
+ }
+ case 313:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1680
+ {
+ yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str}
+ }
+ case 314:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1685
+ {
+ yyVAL.str = AscScr
+ }
+ case 315:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1689
+ {
+ yyVAL.str = AscScr
+ }
+ case 316:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1693
+ {
+ yyVAL.str = DescScr
+ }
+ case 317:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1698
+ {
+ yyVAL.limit = nil
+ }
+ case 318:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1702
+ {
+ yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr}
+ }
+ case 319:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1706
+ {
+ yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr}
+ }
+ case 320:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1710
+ {
+ yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr}
+ }
+ case 321:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1723
+ {
+ yyVAL.ins = &Insert{Rows: yyDollar[2].values}
+ }
+ case 322:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1727
+ {
+ yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt}
+ }
+ case 323:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1731
+ {
+ // Drop the redundant parenthesis.
+ yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt}
+ }
+ case 324:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1736
+ {
+ yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values}
+ }
+ case 325:
+ yyDollar = yyS[yypt-4 : yypt+1]
+ //line sql.y:1740
+ {
+ yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt}
+ }
+ case 326:
+ yyDollar = yyS[yypt-6 : yypt+1]
+ //line sql.y:1744
+ {
+ // Drop the redundant parenthesis.
+ yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt}
+ }
+ case 327:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1751
+ {
+ yyVAL.columns = Columns{yyDollar[1].colIdent}
+ }
+ case 328:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1755
+ {
+ yyVAL.columns = Columns{yyDollar[3].colIdent}
+ }
+ case 329:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1759
+ {
+ yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent)
+ }
+ case 330:
+ yyDollar = yyS[yypt-5 : yypt+1]
+ //line sql.y:1763
+ {
+ yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent)
+ }
+ case 331:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1769
+ {
+ yyVAL.values = Values{yyDollar[1].valTuple}
+ }
+ case 332:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1773
+ {
+ yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple)
+ }
+ case 333:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1779
+ {
+ yyVAL.valTuple = yyDollar[1].valTuple
+ }
+ case 334:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1783
+ {
+ yyVAL.valTuple = ValTuple{}
+ }
+ case 335:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1789
+ {
+ yyVAL.valTuple = ValTuple(yyDollar[2].exprs)
+ }
+ case 336:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1795
+ {
+ if len(yyDollar[1].valTuple) == 1 {
+ yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]}
+ } else {
+ yyVAL.expr = yyDollar[1].valTuple
+ }
+ }
+ case 337:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1805
+ {
+ yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr}
+ }
+ case 338:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1809
+ {
+ yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr)
+ }
+ case 339:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1815
+ {
+ yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr}
+ }
+ case 340:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1820
+ {
+ yyVAL.byt = 0
+ }
+ case 341:
+ yyDollar = yyS[yypt-2 : yypt+1]
+ //line sql.y:1822
+ {
+ yyVAL.byt = 1
+ }
+ case 342:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1825
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 343:
+ yyDollar = yyS[yypt-3 : yypt+1]
+ //line sql.y:1827
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 344:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1830
+ {
+ yyVAL.str = ""
+ }
+ case 345:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1832
+ {
+ yyVAL.str = IgnoreStr
+ }
+ case 346:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:1835
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 347:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1837
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 348:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1839
+ {
+ yyVAL.empty = struct{}{}
+ }
+ case 349:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1843
+ {
+ yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes))
+ }
+ case 350:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1847
+ {
+ yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes))
+ }
+ case 352:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1854
+ {
+ yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes))
+ }
+ case 353:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1860
+ {
+ yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes))
+ }
+ case 354:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1864
+ {
+ yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes))
+ }
+ case 356:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:1871
+ {
+ yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes))
+ }
+ case 464:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:2004
+ {
+ if incNesting(yylex) {
+ yylex.Error("max nesting level reached")
+ return 1
+ }
+ }
+ case 465:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:2013
+ {
+ decNesting(yylex)
+ }
+ case 466:
+ yyDollar = yyS[yypt-0 : yypt+1]
+ //line sql.y:2018
+ {
+ forceEOF(yylex)
+ }
+ case 467:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:2022
+ {
+ forceEOF(yylex)
+ }
+ case 468:
+ yyDollar = yyS[yypt-1 : yypt+1]
+ //line sql.y:2026
+ {
+ forceEOF(yylex)
+ }
+ }
+ goto yystack /* stack new state and value */
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/sql.y b/vendor/github.com/CovenantSQL/sqlparser/sql.y
new file mode 100644
index 000000000..b0199ec56
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/sql.y
@@ -0,0 +1,2028 @@
+/*
+Copyright 2017 Google Inc.
+Copyright 2018 The CovenantSQL Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+%{
+package sqlparser
+
+func setParseTree(yylex interface{}, stmt Statement) {
+ yylex.(*Tokenizer).ParseTree = stmt
+}
+
+func setAllowComments(yylex interface{}, allow bool) {
+ yylex.(*Tokenizer).AllowComments = allow
+}
+
+func setDDL(yylex interface{}, ddl *DDL) {
+ yylex.(*Tokenizer).partialDDL = ddl
+}
+
+func incNesting(yylex interface{}) bool {
+ yylex.(*Tokenizer).nesting++
+ if yylex.(*Tokenizer).nesting == 200 {
+ return true
+ }
+ return false
+}
+
+func decNesting(yylex interface{}) {
+ yylex.(*Tokenizer).nesting--
+}
+
+// forceEOF forces the lexer to end prematurely. Not all SQL statements
+// are supported by the Parser, thus calling forceEOF will make the lexer
+// return EOF early.
+func forceEOF(yylex interface{}) {
+ yylex.(*Tokenizer).ForceEOF = true
+}
+
+%}
+
+%union {
+ empty struct{}
+ statement Statement
+ selStmt SelectStatement
+ ddl *DDL
+ ins *Insert
+ byt byte
+ bytes []byte
+ bytes2 [][]byte
+ str string
+ strs []string
+ selectExprs SelectExprs
+ selectExpr SelectExpr
+ columns Columns
+ colName *ColName
+ tableExprs TableExprs
+ tableExpr TableExpr
+ joinCondition JoinCondition
+ tableName TableName
+ expr Expr
+ exprs Exprs
+ boolVal BoolVal
+ colTuple ColTuple
+ values Values
+ valTuple ValTuple
+ subquery *Subquery
+ whens []*When
+ when *When
+ orderBy OrderBy
+ order *Order
+ limit *Limit
+ updateExprs UpdateExprs
+ setExprs SetExprs
+ updateExpr *UpdateExpr
+ setExpr *SetExpr
+ colIdent ColIdent
+ tableIdent TableIdent
+ convertType *ConvertType
+ aliasedTableName *AliasedTableExpr
+ TableSpec *TableSpec
+ columnType ColumnType
+ colKeyOpt ColumnKeyOption
+ optVal *SQLVal
+ LengthScaleOption LengthScaleOption
+ columnDefinition *ColumnDefinition
+ indexDefinition *IndexDefinition
+ indexInfo *IndexInfo
+ indexColumn *IndexColumn
+ indexColumns []*IndexColumn
+}
+
+%token LEX_ERROR
+%left UNION
+%token SELECT INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET
+%token ALL DISTINCT AS EXISTS ASC DESC INTO KEY DEFAULT SET
+%token VALUES LAST_INSERT_ID
+%left JOIN LEFT RIGHT INNER OUTER CROSS NATURAL
+%left ON USING
+%token '(' ',' ')'
+%token ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT
+%token NULL TRUE FALSE
+
+// Precedence dictated by mysql. But the vitess grammar is simplified.
+// Some of these operators don't conflict in our situation. Nevertheless,
+// it's better to have these listed in the correct order. Also, we don't
+// support all operators yet.
+%left OR
+%left AND
+%right NOT '!'
+%left BETWEEN CASE WHEN THEN ELSE END
+%left '=' '<' '>' LE GE NE IS LIKE REGEXP IN NULL_SAFE_NOTEQUAL
+%left '|'
+%left '&'
+%left SHIFT_LEFT SHIFT_RIGHT
+%left '+' '-'
+%left '*' '/' DIV '%' MOD
+%left '^'
+%right '~' UNARY
+%right INTERVAL
+%nonassoc '.'
+
+// DDL Tokens
+%token CREATE ALTER DROP RENAME ADD
+%token TABLE INDEX TO IGNORE IF UNIQUE PRIMARY COLUMN CONSTRAINT FOREIGN
+%token SHOW DESCRIBE DATE ESCAPE
+
+// Type Tokens
+%token TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM
+%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC
+%token TIME TIMESTAMP DATETIME YEAR
+%token CHAR VARCHAR BOOL NCHAR
+%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT
+%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB
+
+// Type Modifiers
+%token AUTO_INCREMENT SIGNED UNSIGNED ZEROFILL
+
+// Supported SHOW tokens
+%token TABLES
+
+// Functions
+%token CURRENT_TIMESTAMP CURRENT_DATE CURRENT_TIME
+%token REPLACE
+%token CAST
+%token SUBSTR
+%token GROUP_CONCAT SEPARATOR
+
+// MySQL reserved words that are unused by this grammar will map to this token.
+%token UNUSED
+
+%type command
+%type select_statement base_select union_lhs union_rhs
+%type insert_statement update_statement delete_statement
+%type create_statement alter_statement drop_statement
+%type create_table_prefix
+%type show_statement other_statement
+%type comment_opt comment_list
+%type union_op insert_or_replace
+%type distinct_opt separator_opt
+%type like_escape_opt
+%type select_expression_list select_expression_list_opt
+%type select_expression
+%type expression
+%type from_opt table_references
+%type table_reference table_factor join_table
+%type join_condition join_condition_opt
+%type inner_join outer_join natural_join
+%type table_name into_table_name
+%type aliased_table_name
+%type where_expression_opt
+%type condition
+%type boolean_value
+%type compare
+%type insert_data
+%type value value_expression
+%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict
+%type is_suffix
+%type col_tuple
+%type expression_list
+%type tuple_list
+%type row_tuple tuple_or_empty
+%type tuple_expression
+%type subquery
+%type column_name
+%type when_expression_list
+%type when_expression
+%type expression_opt else_expression_opt
+%type group_by_opt
+%type having_opt
+%type order_by_opt order_list
+%type order
+%type asc_desc_opt
+%type limit_opt
+%type ins_column_list column_list
+%type update_list
+%type update_expression
+%type ignore_opt default_opt
+%type exists_opt
+%type not_exists_opt constraint_opt
+%type reserved_keyword non_reserved_keyword
+%type sql_id reserved_sql_id col_alias as_ci_opt
+%type table_id reserved_table_id table_alias as_opt_id
+%type as_opt
+%type ddl_force_eof
+%type convert_type
+%type column_type
+%type int_type decimal_type numeric_type time_type char_type
+%type length_opt column_default_opt
+%type unsigned_opt zero_fill_opt
+%type float_length_opt decimal_length_opt
+%type null_opt auto_increment_opt
+%type column_key_opt
+%type column_definition
+%type index_definition
+%type index_or_key
+%type table_spec table_column_list
+%type table_option_list table_option table_opt_value
+%type index_info
+%type index_column
+%type index_column_list
+%type alter_object_type
+
+%start any_command
+
+%%
+
+any_command:
+ command semicolon_opt
+ {
+ setParseTree(yylex, $1)
+ }
+
+semicolon_opt:
+/*empty*/ {}
+| ';' {}
+
+command:
+ select_statement
+ {
+ $$ = $1
+ }
+| insert_statement
+| update_statement
+| delete_statement
+| create_statement
+| alter_statement
+| drop_statement
+| show_statement
+| other_statement
+
+select_statement:
+ base_select order_by_opt limit_opt
+ {
+ sel := $1.(*Select)
+ sel.OrderBy = $2
+ sel.Limit = $3
+ $$ = sel
+ }
+| union_lhs union_op union_rhs order_by_opt limit_opt
+ {
+ $$ = &Union{Type: $2, Left: $1, Right: $3, OrderBy: $4, Limit: $5}
+ }
+
+// base_select is an unparenthesized SELECT with no order by clause or beyond.
+base_select:
+ SELECT comment_opt distinct_opt select_expression_list from_opt where_expression_opt group_by_opt having_opt
+ {
+ $$ = &Select{Comments: Comments($2), Distinct: $3, SelectExprs: $4, From: $5, Where: NewWhere(WhereStr, $6), GroupBy: GroupBy($7), Having: NewWhere(HavingStr, $8)}
+ }
+| SELECT comment_opt distinct_opt select_expression_list
+ {
+ $$ = &Select{Comments: Comments($2), Distinct: $3, SelectExprs: $4}
+ }
+
+union_lhs:
+ select_statement
+ {
+ $$ = $1
+ }
+| openb select_statement closeb
+ {
+ $$ = &ParenSelect{Select: $2}
+ }
+
+union_rhs:
+ base_select
+ {
+ $$ = $1
+ }
+| openb select_statement closeb
+ {
+ $$ = &ParenSelect{Select: $2}
+ }
+
+
+insert_statement:
+ insert_or_replace comment_opt ignore_opt into_table_name insert_data
+ {
+ // insert_data returns a *Insert pre-filled with Columns & Values
+ ins := $5
+ ins.Action = $1
+ ins.Comments = $2
+ ins.Ignore = $3
+ ins.Table = $4
+ $$ = ins
+ }
+| insert_or_replace comment_opt ignore_opt into_table_name SET update_list
+ {
+ cols := make(Columns, 0, len($6))
+ vals := make(ValTuple, 0, len($6))
+ for _, updateList := range $6 {
+ cols = append(cols, updateList.Name.Name)
+ vals = append(vals, updateList.Expr)
+ }
+ $$ = &Insert{Action: $1, Comments: Comments($2), Ignore: $3, Table: $4, Columns: cols, Rows: Values{vals}}
+ }
+
+insert_or_replace:
+ INSERT
+ {
+ $$ = InsertStr
+ }
+| REPLACE
+ {
+ $$ = ReplaceStr
+ }
+| INSERT OR REPLACE
+ {
+ $$ = ReplaceStr
+ }
+
+update_statement:
+ UPDATE comment_opt table_references SET update_list where_expression_opt order_by_opt limit_opt
+ {
+ $$ = &Update{Comments: Comments($2), TableExprs: $3, Exprs: $5, Where: NewWhere(WhereStr, $6), OrderBy: $7, Limit: $8}
+ }
+
+delete_statement:
+ DELETE comment_opt FROM table_name where_expression_opt order_by_opt limit_opt
+ {
+ $$ = &Delete{Comments: Comments($2), TableExprs: TableExprs{&AliasedTableExpr{Expr:$4}}, Where: NewWhere(WhereStr, $5), OrderBy: $6, Limit: $7}
+ }
+
+create_statement:
+ create_table_prefix table_spec
+ {
+ $1.TableSpec = $2
+ $$ = $1
+ }
+| CREATE constraint_opt INDEX ID ON table_name ddl_force_eof
+ {
+ // Change this to an alter statement
+ $$ = &DDL{Action: CreateIndexStr, Table: $6, NewName:$6}
+ }
+
+create_table_prefix:
+ CREATE TABLE not_exists_opt table_name
+ {
+ $$ = &DDL{Action: CreateStr, NewName: $4}
+ setDDL(yylex, $$)
+ }
+
+table_spec:
+ '(' table_column_list ')' table_option_list
+ {
+ $$ = $2
+ $$.Options = $4
+ }
+
+table_column_list:
+ column_definition
+ {
+ $$ = &TableSpec{}
+ $$.AddColumn($1)
+ }
+| table_column_list ',' column_definition
+ {
+ $$.AddColumn($3)
+ }
+| table_column_list ',' index_definition
+ {
+ $$.AddIndex($3)
+ }
+
+column_definition:
+ ID column_type null_opt column_default_opt auto_increment_opt column_key_opt
+ {
+ $2.NotNull = $3
+ $2.Default = $4
+ $2.Autoincrement = $5
+ $2.KeyOpt = $6
+ $$ = &ColumnDefinition{Name: NewColIdent(string($1)), Type: $2}
+ }
+column_type:
+ numeric_type unsigned_opt zero_fill_opt
+ {
+ $$ = $1
+ $$.Unsigned = $2
+ $$.Zerofill = $3
+ }
+| char_type
+| time_type
+
+numeric_type:
+ int_type length_opt
+ {
+ $$ = $1
+ $$.Length = $2
+ }
+| decimal_type
+ {
+ $$ = $1
+ }
+
+int_type:
+ TINYINT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| SMALLINT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| MEDIUMINT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| INT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| INTEGER
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| BIGINT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+
+decimal_type:
+REAL float_length_opt
+ {
+ $$ = ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| DOUBLE float_length_opt
+ {
+ $$ = ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| FLOAT_TYPE float_length_opt
+ {
+ $$ = ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| DECIMAL decimal_length_opt
+ {
+ $$ = ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| NUMERIC decimal_length_opt
+ {
+ $$ = ColumnType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+
+time_type:
+ DATE
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| TIME length_opt
+ {
+ $$ = ColumnType{Type: string($1), Length: $2}
+ }
+| TIMESTAMP length_opt
+ {
+ $$ = ColumnType{Type: string($1), Length: $2}
+ }
+| DATETIME length_opt
+ {
+ $$ = ColumnType{Type: string($1), Length: $2}
+ }
+| YEAR
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+
+char_type:
+ CHAR length_opt
+ {
+ $$ = ColumnType{Type: string($1), Length: $2}
+ }
+| VARCHAR length_opt
+ {
+ $$ = ColumnType{Type: string($1), Length: $2}
+ }
+| TEXT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| TINYTEXT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| MEDIUMTEXT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| LONGTEXT
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| BLOB
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| TINYBLOB
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| MEDIUMBLOB
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+| LONGBLOB
+ {
+ $$ = ColumnType{Type: string($1)}
+ }
+
+length_opt:
+ {
+ $$ = nil
+ }
+| '(' INTEGRAL ')'
+ {
+ $$ = NewIntVal($2)
+ }
+
+float_length_opt:
+ {
+ $$ = LengthScaleOption{}
+ }
+| '(' INTEGRAL ',' INTEGRAL ')'
+ {
+ $$ = LengthScaleOption{
+ Length: NewIntVal($2),
+ Scale: NewIntVal($4),
+ }
+ }
+
+decimal_length_opt:
+ {
+ $$ = LengthScaleOption{}
+ }
+| '(' INTEGRAL ')'
+ {
+ $$ = LengthScaleOption{
+ Length: NewIntVal($2),
+ }
+ }
+| '(' INTEGRAL ',' INTEGRAL ')'
+ {
+ $$ = LengthScaleOption{
+ Length: NewIntVal($2),
+ Scale: NewIntVal($4),
+ }
+ }
+
+unsigned_opt:
+ {
+ $$ = BoolVal(false)
+ }
+| UNSIGNED
+ {
+ $$ = BoolVal(true)
+ }
+
+zero_fill_opt:
+ {
+ $$ = BoolVal(false)
+ }
+| ZEROFILL
+ {
+ $$ = BoolVal(true)
+ }
+
+// Null opt returns false to mean NULL (i.e. the default) and true for NOT NULL
+null_opt:
+ {
+ $$ = BoolVal(false)
+ }
+| NULL
+ {
+ $$ = BoolVal(false)
+ }
+| NOT NULL
+ {
+ $$ = BoolVal(true)
+ }
+
+column_default_opt:
+ {
+ $$ = nil
+ }
+| DEFAULT STRING
+ {
+ $$ = NewStrVal($2)
+ }
+| DEFAULT INTEGRAL
+ {
+ $$ = NewIntVal($2)
+ }
+| DEFAULT FLOAT
+ {
+ $$ = NewFloatVal($2)
+ }
+| DEFAULT NULL
+ {
+ $$ = NewValArg($2)
+ }
+| DEFAULT CURRENT_TIMESTAMP
+ {
+ $$ = NewValArg($2)
+ }
+
+auto_increment_opt:
+ {
+ $$ = BoolVal(false)
+ }
+| AUTO_INCREMENT
+ {
+ $$ = BoolVal(true)
+ }
+
+column_key_opt:
+ {
+ $$ = colKeyNone
+ }
+| PRIMARY KEY
+ {
+ $$ = colKeyPrimary
+ }
+| KEY
+ {
+ $$ = colKey
+ }
+| UNIQUE KEY
+ {
+ $$ = colKeyUniqueKey
+ }
+| UNIQUE
+ {
+ $$ = colKeyUnique
+ }
+
+index_definition:
+ index_info '(' index_column_list ')'
+ {
+ $$ = &IndexDefinition{Info: $1, Columns: $3}
+ }
+
+index_info:
+ PRIMARY KEY
+ {
+ $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true}
+ }
+| UNIQUE index_or_key ID
+ {
+ $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent(string($3)), Unique: true}
+ }
+| UNIQUE ID
+ {
+ $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: true}
+ }
+| index_or_key ID
+ {
+ $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: false}
+ }
+
+index_or_key:
+ INDEX
+ {
+ $$ = string($1)
+ }
+ | KEY
+ {
+ $$ = string($1)
+ }
+
+index_column_list:
+ index_column
+ {
+ $$ = []*IndexColumn{$1}
+ }
+| index_column_list ',' index_column
+ {
+ $$ = append($$, $3)
+ }
+
+index_column:
+ sql_id length_opt
+ {
+ $$ = &IndexColumn{Column: $1, Length: $2}
+ }
+
+table_option_list:
+ {
+ $$ = ""
+ }
+| table_option
+ {
+ $$ = " " + string($1)
+ }
+| table_option_list ',' table_option
+ {
+ $$ = string($1) + ", " + string($3)
+ }
+
+// rather than explicitly parsing the various keywords for table options,
+// just accept any number of keywords, IDs, strings, numbers, and '='
+table_option:
+ table_opt_value
+ {
+ $$ = $1
+ }
+| table_option table_opt_value
+ {
+ $$ = $1 + " " + $2
+ }
+| table_option '=' table_opt_value
+ {
+ $$ = $1 + "=" + $3
+ }
+
+table_opt_value:
+ reserved_sql_id
+ {
+ $$ = $1.String()
+ }
+| STRING
+ {
+ $$ = "'" + string($1) + "'"
+ }
+| INTEGRAL
+ {
+ $$ = string($1)
+ }
+
+alter_statement:
+ ALTER TABLE table_name ADD alter_object_type column_definition
+ {
+ $$ = &DDL{Action: AlterStr, Table: $3, NewName: $3}
+ }
+| ALTER TABLE table_name RENAME TO table_name
+ {
+ // Change this to a rename statement
+ $$ = &DDL{Action: RenameStr, Table: $3, NewName: $6}
+ }
+| ALTER TABLE table_name RENAME alter_object_type column_name TO column_name
+ {
+ // Rename an index can just be an alter
+ $$ = &DDL{Action: AlterStr, Table: $3, NewName: $3}
+ }
+
+alter_object_type:
+ {} | COLUMN
+
+drop_statement:
+ DROP TABLE exists_opt table_name
+ {
+ var exists bool
+ if $3 != 0 {
+ exists = true
+ }
+ $$ = &DDL{Action: DropStr, Table: $4, IfExists: exists}
+ }
+| DROP INDEX exists_opt table_name
+ {
+ var exists bool
+ if $3 != 0 {
+ exists = true
+ }
+ $$ = &DDL{Action: DropIndexStr, Table: $4, IfExists: exists}
+ }
+
+show_statement:
+SHOW CREATE TABLE table_name
+ {
+ $$ = &Show{Type: string($3), ShowCreate: true, OnTable: $4}
+ }
+| SHOW INDEX FROM TABLE table_name
+ {
+ $$ = &Show{Type: string($2), OnTable: $5}
+ }
+| SHOW TABLE table_name
+ {
+ $$ = &Show{Type: string($2), OnTable: $3}
+ }
+| SHOW TABLES
+ {
+ $$ = &Show{Type: string($2)}
+ }
+
+other_statement:
+ DESC table_name
+ {
+ $$ = &Show{Type: "table", OnTable: $2}
+ }
+| DESCRIBE table_name
+ {
+ $$ = &Show{Type: "table", OnTable: $2}
+ }
+
+comment_opt:
+ {
+ setAllowComments(yylex, true)
+ }
+ comment_list
+ {
+ $$ = $2
+ setAllowComments(yylex, false)
+ }
+
+comment_list:
+ {
+ $$ = nil
+ }
+| comment_list COMMENT
+ {
+ $$ = append($1, $2)
+ }
+
+union_op:
+ UNION
+ {
+ $$ = UnionStr
+ }
+| UNION ALL
+ {
+ $$ = UnionAllStr
+ }
+
+distinct_opt:
+ {
+ $$ = ""
+ }
+| DISTINCT
+ {
+ $$ = DistinctStr
+ }
+
+select_expression_list_opt:
+ {
+ $$ = nil
+ }
+| select_expression_list
+ {
+ $$ = $1
+ }
+
+select_expression_list:
+ select_expression
+ {
+ $$ = SelectExprs{$1}
+ }
+| select_expression_list ',' select_expression
+ {
+ $$ = append($$, $3)
+ }
+
+select_expression:
+ '*'
+ {
+ $$ = &StarExpr{}
+ }
+| expression as_ci_opt
+ {
+ $$ = &AliasedExpr{Expr: $1, As: $2}
+ }
+| table_id '.' '*'
+ {
+ $$ = &StarExpr{TableName: TableName{Name: $1}}
+ }
+| table_id '.' reserved_table_id '.' '*'
+ {
+ $$ = &StarExpr{TableName: TableName{Qualifier: $1, Name: $3}}
+ }
+
+as_ci_opt:
+ {
+ $$ = ColIdent{}
+ }
+| col_alias
+ {
+ $$ = $1
+ }
+| AS col_alias
+ {
+ $$ = $2
+ }
+
+col_alias:
+ sql_id
+| STRING
+ {
+ $$ = NewColIdent(string($1))
+ }
+
+from_opt:
+ FROM table_references
+ {
+ $$ = $2
+ }
+
+table_references:
+ table_reference
+ {
+ $$ = TableExprs{$1}
+ }
+| table_references ',' table_reference
+ {
+ $$ = append($$, $3)
+ }
+
+table_reference:
+ table_factor
+| join_table
+
+table_factor:
+ aliased_table_name
+ {
+ $$ = $1
+ }
+| subquery
+ {
+ $$ = &AliasedTableExpr{Expr:$1}
+ }
+| subquery as_opt table_id
+ {
+ $$ = &AliasedTableExpr{Expr:$1, As: $3}
+ }
+| openb table_references closeb
+ {
+ $$ = &ParenTableExpr{Exprs: $2}
+ }
+
+aliased_table_name:
+table_name as_opt_id
+ {
+ $$ = &AliasedTableExpr{Expr:$1, As: $2}
+ }
+
+column_list:
+ sql_id
+ {
+ $$ = Columns{$1}
+ }
+| column_list ',' sql_id
+ {
+ $$ = append($$, $3)
+ }
+
+join_table:
+ table_reference inner_join table_factor join_condition_opt
+ {
+ $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4}
+ }
+| table_reference outer_join table_reference join_condition
+ {
+ $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4}
+ }
+| table_reference natural_join table_factor
+ {
+ $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3}
+ }
+
+join_condition:
+ ON expression
+ { $$ = JoinCondition{On: $2} }
+| USING '(' column_list ')'
+ { $$ = JoinCondition{Using: $3} }
+
+join_condition_opt:
+%prec JOIN
+ { $$ = JoinCondition{} }
+| join_condition
+ { $$ = $1 }
+
+as_opt:
+ { $$ = struct{}{} }
+| AS
+ { $$ = struct{}{} }
+
+as_opt_id:
+ {
+ $$ = NewTableIdent("")
+ }
+| table_alias
+ {
+ $$ = $1
+ }
+| AS table_alias
+ {
+ $$ = $2
+ }
+
+table_alias:
+ table_id
+| STRING
+ {
+ $$ = NewTableIdent(string($1))
+ }
+
+inner_join:
+ JOIN
+ {
+ $$ = JoinStr
+ }
+| INNER JOIN
+ {
+ $$ = InnerJoinStr
+ }
+| CROSS JOIN
+ {
+ $$ = CrossJoinStr
+ }
+
+outer_join:
+ LEFT JOIN
+ {
+ $$ = LeftJoinStr
+ }
+| LEFT OUTER JOIN
+ {
+ $$ = LeftJoinStr
+ }
+
+natural_join:
+ NATURAL JOIN
+ {
+ $$ = NaturalJoinStr
+ }
+| NATURAL outer_join
+ {
+ $$ = NaturalLeftJoinStr
+ }
+
+into_table_name:
+ INTO table_name
+ {
+ $$ = $2
+ }
+| table_name
+ {
+ $$ = $1
+ }
+
+table_name:
+ table_id
+ {
+ $$ = TableName{Name: $1}
+ }
+| table_id '.' reserved_table_id
+ {
+ $$ = TableName{Qualifier: $1, Name: $3}
+ }
+
+where_expression_opt:
+ {
+ $$ = nil
+ }
+| WHERE expression
+ {
+ $$ = $2
+ }
+
+expression:
+ condition
+ {
+ $$ = $1
+ }
+| expression AND expression
+ {
+ $$ = &AndExpr{Left: $1, Right: $3}
+ }
+| expression OR expression
+ {
+ $$ = &OrExpr{Left: $1, Right: $3}
+ }
+| NOT expression
+ {
+ $$ = &NotExpr{Expr: $2}
+ }
+| expression IS is_suffix
+ {
+ $$ = &IsExpr{Operator: $3, Expr: $1}
+ }
+| value_expression
+ {
+ $$ = $1
+ }
+| DEFAULT default_opt
+ {
+ $$ = &Default{ColName: $2}
+ }
+
+default_opt:
+ /* empty */
+ {
+ $$ = ""
+ }
+| openb ID closeb
+ {
+ $$ = string($2)
+ }
+
+boolean_value:
+ TRUE
+ {
+ $$ = BoolVal(true)
+ }
+| FALSE
+ {
+ $$ = BoolVal(false)
+ }
+
+condition:
+ value_expression compare value_expression
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3}
+ }
+| value_expression IN col_tuple
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: InStr, Right: $3}
+ }
+| value_expression NOT IN col_tuple
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: NotInStr, Right: $4}
+ }
+| value_expression LIKE value_expression like_escape_opt
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: LikeStr, Right: $3, Escape: $4}
+ }
+| value_expression NOT LIKE value_expression like_escape_opt
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: NotLikeStr, Right: $4, Escape: $5}
+ }
+| value_expression REGEXP value_expression
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: RegexpStr, Right: $3}
+ }
+| value_expression NOT REGEXP value_expression
+ {
+ $$ = &ComparisonExpr{Left: $1, Operator: NotRegexpStr, Right: $4}
+ }
+| value_expression BETWEEN value_expression AND value_expression
+ {
+ $$ = &RangeCond{Left: $1, Operator: BetweenStr, From: $3, To: $5}
+ }
+| value_expression NOT BETWEEN value_expression AND value_expression
+ {
+ $$ = &RangeCond{Left: $1, Operator: NotBetweenStr, From: $4, To: $6}
+ }
+| EXISTS subquery
+ {
+ $$ = &ExistsExpr{Subquery: $2}
+ }
+
+is_suffix:
+ NULL
+ {
+ $$ = IsNullStr
+ }
+| NOT NULL
+ {
+ $$ = IsNotNullStr
+ }
+| TRUE
+ {
+ $$ = IsTrueStr
+ }
+| NOT TRUE
+ {
+ $$ = IsNotTrueStr
+ }
+| FALSE
+ {
+ $$ = IsFalseStr
+ }
+| NOT FALSE
+ {
+ $$ = IsNotFalseStr
+ }
+
+compare:
+ '='
+ {
+ $$ = EqualStr
+ }
+| '<'
+ {
+ $$ = LessThanStr
+ }
+| '>'
+ {
+ $$ = GreaterThanStr
+ }
+| LE
+ {
+ $$ = LessEqualStr
+ }
+| GE
+ {
+ $$ = GreaterEqualStr
+ }
+| NE
+ {
+ $$ = NotEqualStr
+ }
+| NULL_SAFE_NOTEQUAL
+ {
+ $$ = NullSafeNotEqualStr
+ }
+
+like_escape_opt:
+ {
+ $$ = nil
+ }
+| ESCAPE value_expression
+ {
+ $$ = $2
+ }
+
+col_tuple:
+ row_tuple
+ {
+ $$ = $1
+ }
+| subquery
+ {
+ $$ = $1
+ }
+| LIST_ARG
+ {
+ $$ = ListArg($1)
+ }
+
+subquery:
+ openb select_statement closeb
+ {
+ $$ = &Subquery{$2}
+ }
+
+expression_list:
+ expression
+ {
+ $$ = Exprs{$1}
+ }
+| expression_list ',' expression
+ {
+ $$ = append($1, $3)
+ }
+
+value_expression:
+ value
+ {
+ $$ = $1
+ }
+| boolean_value
+ {
+ $$ = $1
+ }
+| column_name
+ {
+ $$ = $1
+ }
+| tuple_expression
+ {
+ $$ = $1
+ }
+| subquery
+ {
+ $$ = $1
+ }
+| value_expression '&' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: BitAndStr, Right: $3}
+ }
+| value_expression '|' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: BitOrStr, Right: $3}
+ }
+| value_expression '^' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: BitXorStr, Right: $3}
+ }
+| value_expression '+' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: PlusStr, Right: $3}
+ }
+| value_expression '-' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: MinusStr, Right: $3}
+ }
+| value_expression '*' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: MultStr, Right: $3}
+ }
+| value_expression '/' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: DivStr, Right: $3}
+ }
+| value_expression DIV value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: IntDivStr, Right: $3}
+ }
+| value_expression '%' value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3}
+ }
+| value_expression MOD value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3}
+ }
+| value_expression SHIFT_LEFT value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftStr, Right: $3}
+ }
+| value_expression SHIFT_RIGHT value_expression
+ {
+ $$ = &BinaryExpr{Left: $1, Operator: ShiftRightStr, Right: $3}
+ }
+| '+' value_expression %prec UNARY
+ {
+ if num, ok := $2.(*SQLVal); ok && num.Type == IntVal {
+ $$ = num
+ } else {
+ $$ = &UnaryExpr{Operator: UPlusStr, Expr: $2}
+ }
+ }
+| '-' value_expression %prec UNARY
+ {
+ if num, ok := $2.(*SQLVal); ok && num.Type == IntVal {
+ // Handle double negative
+ if num.Val[0] == '-' {
+ num.Val = num.Val[1:]
+ $$ = num
+ } else {
+ $$ = NewIntVal(append([]byte("-"), num.Val...))
+ }
+ } else {
+ $$ = &UnaryExpr{Operator: UMinusStr, Expr: $2}
+ }
+ }
+| '~' value_expression
+ {
+ $$ = &UnaryExpr{Operator: TildaStr, Expr: $2}
+ }
+| '!' value_expression %prec UNARY
+ {
+ $$ = &UnaryExpr{Operator: BangStr, Expr: $2}
+ }
+| INTERVAL value_expression sql_id
+ {
+ // This rule prevents the usage of INTERVAL
+ // as a function. If support is needed for that,
+ // we'll need to revisit this. The solution
+ // will be non-trivial because of grammar conflicts.
+ $$ = &IntervalExpr{Expr: $2, Unit: $3.String()}
+ }
+| function_call_generic
+| function_call_keyword
+| function_call_nonkeyword
+| function_call_conflict
+
+/*
+ Regular function calls without special token or syntax, guaranteed to not
+ introduce side effects due to being a simple identifier
+*/
+function_call_generic:
+ sql_id openb select_expression_list_opt closeb
+ {
+ $$ = &FuncExpr{Name: $1, Exprs: $3}
+ }
+| sql_id openb DISTINCT select_expression_list closeb
+ {
+ $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4}
+ }
+| table_id '.' reserved_sql_id openb select_expression_list_opt closeb
+ {
+ $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5}
+ }
+
+/*
+ Function calls using reserved keywords, with dedicated grammar rules
+ as a result
+*/
+function_call_keyword:
+ CAST openb expression AS convert_type closeb
+ {
+ $$ = &ConvertExpr{Expr: $3, Type: $5}
+ }
+| SUBSTR openb column_name ',' value_expression closeb
+ {
+ $$ = &SubstrExpr{Name: $3, From: $5, To: nil}
+ }
+| SUBSTR openb column_name ',' value_expression ',' value_expression closeb
+ {
+ $$ = &SubstrExpr{Name: $3, From: $5, To: $7}
+ }
+| GROUP_CONCAT openb distinct_opt select_expression_list order_by_opt separator_opt closeb
+ {
+ $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6}
+ }
+| CASE expression_opt when_expression_list else_expression_opt END
+ {
+ $$ = &CaseExpr{Expr: $2, Whens: $3, Else: $4}
+ }
+| VALUES openb column_name closeb
+ {
+ $$ = &ValuesFuncExpr{Name: $3}
+ }
+
+/*
+ Function calls using non reserved keywords but with special syntax forms.
+ Dedicated grammar rules are needed because of the special syntax
+*/
+function_call_nonkeyword:
+ CURRENT_TIMESTAMP
+ {
+ $$ = &TimeExpr{Expr: NewColIdent("current_timestamp")}
+ }
+ // curdate
+| CURRENT_DATE
+ {
+ $$ = &TimeExpr{Expr: NewColIdent("current_date")}
+ }
+ // curtime
+| CURRENT_TIME
+ {
+ $$ = &TimeExpr{Expr: NewColIdent("current_time")}
+ }
+
+/*
+ Function calls using non reserved keywords with *normal* syntax forms. Because
+ the names are non-reserved, they need a dedicated rule so as not to conflict
+*/
+function_call_conflict:
+ IF openb select_expression_list closeb
+ {
+ $$ = &FuncExpr{Name: NewColIdent("if"), Exprs: $3}
+ }
+| MOD openb select_expression_list closeb
+ {
+ $$ = &FuncExpr{Name: NewColIdent("mod"), Exprs: $3}
+ }
+| REPLACE openb select_expression_list closeb
+ {
+ $$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3}
+ }
+
+convert_type:
+ CHAR length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| DATE
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+| DATETIME length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| DECIMAL decimal_length_opt
+ {
+ $$ = &ConvertType{Type: string($1)}
+ $$.Length = $2.Length
+ $$.Scale = $2.Scale
+ }
+| NCHAR length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| SIGNED
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+| SIGNED INTEGER
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+| TIME length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| UNSIGNED
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+| UNSIGNED INTEGER
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+| TIMESTAMP length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| INT length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| TINYINT length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| SMALLINT length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| MEDIUMINT length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| BIGINT length_opt
+ {
+ $$ = &ConvertType{Type: string($1), Length: $2}
+ }
+| STRING
+ {
+ $$ = &ConvertType{Type: string($1)}
+ }
+
+expression_opt:
+ {
+ $$ = nil
+ }
+| expression
+ {
+ $$ = $1
+ }
+
+separator_opt:
+ {
+ $$ = string("")
+ }
+| SEPARATOR STRING
+ {
+ $$ = " separator '"+string($2)+"'"
+ }
+
+when_expression_list:
+ when_expression
+ {
+ $$ = []*When{$1}
+ }
+| when_expression_list when_expression
+ {
+ $$ = append($1, $2)
+ }
+
+when_expression:
+ WHEN expression THEN expression
+ {
+ $$ = &When{Cond: $2, Val: $4}
+ }
+
+else_expression_opt:
+ {
+ $$ = nil
+ }
+| ELSE expression
+ {
+ $$ = $2
+ }
+
+column_name:
+ sql_id
+ {
+ $$ = &ColName{Name: $1}
+ }
+| table_id '.' reserved_sql_id
+ {
+ $$ = &ColName{Qualifier: TableName{Name: $1}, Name: $3}
+ }
+| table_id '.' reserved_table_id '.' reserved_sql_id
+ {
+ $$ = &ColName{Qualifier: TableName{Qualifier: $1, Name: $3}, Name: $5}
+ }
+
+value:
+ STRING
+ {
+ $$ = NewStrVal($1)
+ }
+| HEX
+ {
+ $$ = NewHexVal($1)
+ }
+| INTEGRAL
+ {
+ $$ = NewIntVal($1)
+ }
+| FLOAT
+ {
+ $$ = NewFloatVal($1)
+ }
+| HEXNUM
+ {
+ $$ = NewHexNum($1)
+ }
+| VALUE_ARG
+ {
+ $$ = NewValArg($1)
+ }
+| NULL
+ {
+ $$ = &NullVal{}
+ }
+
+group_by_opt:
+ {
+ $$ = nil
+ }
+| GROUP BY expression_list
+ {
+ $$ = $3
+ }
+
+having_opt:
+ {
+ $$ = nil
+ }
+| HAVING expression
+ {
+ $$ = $2
+ }
+
+order_by_opt:
+ {
+ $$ = nil
+ }
+| ORDER BY order_list
+ {
+ $$ = $3
+ }
+
+order_list:
+ order
+ {
+ $$ = OrderBy{$1}
+ }
+| order_list ',' order
+ {
+ $$ = append($1, $3)
+ }
+
+order:
+ expression asc_desc_opt
+ {
+ $$ = &Order{Expr: $1, Direction: $2}
+ }
+
+asc_desc_opt:
+ {
+ $$ = AscScr
+ }
+| ASC
+ {
+ $$ = AscScr
+ }
+| DESC
+ {
+ $$ = DescScr
+ }
+
+limit_opt:
+ {
+ $$ = nil
+ }
+| LIMIT expression
+ {
+ $$ = &Limit{Rowcount: $2}
+ }
+| LIMIT expression ',' expression
+ {
+ $$ = &Limit{Offset: $2, Rowcount: $4}
+ }
+| LIMIT expression OFFSET expression
+ {
+ $$ = &Limit{Offset: $4, Rowcount: $2}
+ }
+
+// insert_data expands all combinations into a single rule.
+// This avoids a shift/reduce conflict while encountering the
+// following two possible constructs:
+// insert into t1(a, b) (select * from t2)
+// insert into t1(select * from t2)
+// Because the rules are together, the parser can keep shifting
+// the tokens until it disambiguates a as sql_id and select as keyword.
+insert_data:
+ VALUES tuple_list
+ {
+ $$ = &Insert{Rows: $2}
+ }
+| select_statement
+ {
+ $$ = &Insert{Rows: $1}
+ }
+| openb select_statement closeb
+ {
+ // Drop the redundant parenthesis.
+ $$ = &Insert{Rows: $2}
+ }
+| openb ins_column_list closeb VALUES tuple_list
+ {
+ $$ = &Insert{Columns: $2, Rows: $5}
+ }
+| openb ins_column_list closeb select_statement
+ {
+ $$ = &Insert{Columns: $2, Rows: $4}
+ }
+| openb ins_column_list closeb openb select_statement closeb
+ {
+ // Drop the redundant parenthesis.
+ $$ = &Insert{Columns: $2, Rows: $5}
+ }
+
+ins_column_list:
+ sql_id
+ {
+ $$ = Columns{$1}
+ }
+| sql_id '.' sql_id
+ {
+ $$ = Columns{$3}
+ }
+| ins_column_list ',' sql_id
+ {
+ $$ = append($$, $3)
+ }
+| ins_column_list ',' sql_id '.' sql_id
+ {
+ $$ = append($$, $5)
+ }
+
+tuple_list:
+ tuple_or_empty
+ {
+ $$ = Values{$1}
+ }
+| tuple_list ',' tuple_or_empty
+ {
+ $$ = append($1, $3)
+ }
+
+tuple_or_empty:
+ row_tuple
+ {
+ $$ = $1
+ }
+| openb closeb
+ {
+ $$ = ValTuple{}
+ }
+
+row_tuple:
+ openb expression_list closeb
+ {
+ $$ = ValTuple($2)
+ }
+
+tuple_expression:
+ row_tuple
+ {
+ if len($1) == 1 {
+ $$ = &ParenExpr{$1[0]}
+ } else {
+ $$ = $1
+ }
+ }
+
+update_list:
+ update_expression
+ {
+ $$ = UpdateExprs{$1}
+ }
+| update_list ',' update_expression
+ {
+ $$ = append($1, $3)
+ }
+
+update_expression:
+ column_name '=' expression
+ {
+ $$ = &UpdateExpr{Name: $1, Expr: $3}
+ }
+
+exists_opt:
+ { $$ = 0 }
+| IF EXISTS
+ { $$ = 1 }
+
+not_exists_opt:
+ { $$ = struct{}{} }
+| IF NOT EXISTS
+ { $$ = struct{}{} }
+
+ignore_opt:
+ { $$ = "" }
+| IGNORE
+ { $$ = IgnoreStr }
+
+constraint_opt:
+ { $$ = struct{}{} }
+| UNIQUE
+ { $$ = struct{}{} }
+| sql_id
+ { $$ = struct{}{} }
+
+sql_id:
+ ID
+ {
+ $$ = NewColIdent(string($1))
+ }
+| non_reserved_keyword
+ {
+ $$ = NewColIdent(string($1))
+ }
+
+reserved_sql_id:
+ sql_id
+| reserved_keyword
+ {
+ $$ = NewColIdent(string($1))
+ }
+
+table_id:
+ ID
+ {
+ $$ = NewTableIdent(string($1))
+ }
+| non_reserved_keyword
+ {
+ $$ = NewTableIdent(string($1))
+ }
+
+reserved_table_id:
+ table_id
+| reserved_keyword
+ {
+ $$ = NewTableIdent(string($1))
+ }
+
+/*
+ These are not all necessarily reserved in MySQL, but some are.
+
+ These are more importantly reserved because they may conflict with our grammar.
+ If you want to move one that is not reserved in MySQL (i.e. ESCAPE) to the
+ non_reserved_keywords, you'll need to deal with any conflicts.
+
+ Sorted alphabetically
+*/
+reserved_keyword:
+ ADD
+| AND
+| AS
+| ASC
+| AUTO_INCREMENT
+| BETWEEN
+| BY
+| CASE
+| CREATE
+| CROSS
+| CURRENT_DATE
+| CURRENT_TIME
+| CURRENT_TIMESTAMP
+| SUBSTR
+| DEFAULT
+| DELETE
+| DESC
+| DESCRIBE
+| DISTINCT
+| DIV
+| DROP
+| ELSE
+| END
+| ESCAPE
+| EXISTS
+| FALSE
+| FROM
+| GROUP
+| HAVING
+| IF
+| IGNORE
+| IN
+| INDEX
+| INNER
+| INSERT
+| INTERVAL
+| INTO
+| IS
+| JOIN
+| KEY
+| LEFT
+| LIKE
+| LIMIT
+| MOD
+| NATURAL
+| NOT
+| NULL
+| ON
+| OR
+| ORDER
+| OUTER
+| REGEXP
+| RENAME
+| REPLACE
+| RIGHT
+| SELECT
+| SEPARATOR
+| SET
+| SHOW
+| TABLE
+| TABLES
+| THEN
+| TO
+| TRUE
+| UNION
+| UNIQUE
+| UPDATE
+| USING
+| VALUES
+| WHEN
+| WHERE
+
+/*
+ These are non-reserved Vitess, because they don't cause conflicts in the grammar.
+ Some of them may be reserved in MySQL. The good news is we backtick quote them
+ when we rewrite the query, so no issue should arise.
+
+ Sorted alphabetically
+*/
+non_reserved_keyword:
+ BIGINT
+| BLOB
+| BOOL
+| CHAR
+| DATE
+| DATETIME
+| DECIMAL
+| DOUBLE
+| FLOAT_TYPE
+| FOREIGN
+| INT
+| INTEGER
+| LAST_INSERT_ID
+| LONGBLOB
+| LONGTEXT
+| MEDIUMBLOB
+| MEDIUMINT
+| MEDIUMTEXT
+| NCHAR
+| NUMERIC
+| OFFSET
+| PRIMARY
+| REAL
+| SIGNED
+| SMALLINT
+| TEXT
+| TIME
+| TIMESTAMP
+| TINYBLOB
+| TINYINT
+| TINYTEXT
+| UNSIGNED
+| UNUSED
+| VARCHAR
+| YEAR
+| ZEROFILL
+
+openb:
+ '('
+ {
+ if incNesting(yylex) {
+ yylex.Error("max nesting level reached")
+ return 1
+ }
+ }
+
+closeb:
+ ')'
+ {
+ decNesting(yylex)
+ }
+
+ddl_force_eof:
+ {
+ forceEOF(yylex)
+ }
+| openb
+ {
+ forceEOF(yylex)
+ }
+| reserved_sql_id
+ {
+ forceEOF(yylex)
+ }
diff --git a/vendor/github.com/CovenantSQL/sqlparser/token.go b/vendor/github.com/CovenantSQL/sqlparser/token.go
new file mode 100644
index 000000000..53ec0c9d3
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/token.go
@@ -0,0 +1,830 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/CovenantSQL/sqlparser/dependency/bytes2"
+ "github.com/CovenantSQL/sqlparser/dependency/sqltypes"
+)
+
+const (
+ defaultBufSize = 4096
+ eofChar = 0x100
+)
+
+// Tokenizer is the struct used to generate SQL
+// tokens for the parser.
+type Tokenizer struct {
+ InStream io.Reader
+ AllowComments bool
+ ForceEOF bool
+ lastChar uint16
+ Position int
+ lastToken []byte
+ LastError error
+ posVarIndex int
+ ParseTree Statement
+ partialDDL *DDL
+ nesting int
+ multi bool
+ specialComment *Tokenizer
+
+ buf []byte
+ bufPos int
+ bufSize int
+}
+
+// NewStringTokenizer creates a new Tokenizer for the
+// sql string.
+func NewStringTokenizer(sql string) *Tokenizer {
+ buf := []byte(sql)
+ return &Tokenizer{
+ buf: buf,
+ bufSize: len(buf),
+ }
+}
+
+// NewTokenizer creates a new Tokenizer reading a sql
+// string from the io.Reader.
+func NewTokenizer(r io.Reader) *Tokenizer {
+ return &Tokenizer{
+ InStream: r,
+ buf: make([]byte, defaultBufSize),
+ }
+}
+
+// keywords is a map of mysql keywords that fall into two categories:
+// 1) keywords considered reserved by MySQL
+// 2) keywords for us to handle specially in sql.y
+//
+// Those marked as UNUSED are likely reserved keywords. We add them here so that
+// when rewriting queries we can properly backtick quote them so they don't cause issues
+//
+// NOTE: If you add new keywords, add them also to the reserved_keywords or
+// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used
+// in identifiers. See the docs for each grammar to determine which one to put it into.
+var keywords = map[string]int{
+ "accessible": UNUSED,
+ "add": ADD,
+ "all": ALL,
+ "alter": ALTER,
+ "and": AND,
+ "as": AS,
+ "asc": ASC,
+ "asensitive": UNUSED,
+ "auto_increment": AUTO_INCREMENT,
+ "before": UNUSED,
+ "between": BETWEEN,
+ "bigint": BIGINT,
+ "blob": BLOB,
+ "bool": BOOL,
+ "both": UNUSED,
+ "by": BY,
+ "call": UNUSED,
+ "cascade": UNUSED,
+ "case": CASE,
+ "cast": CAST,
+ "change": UNUSED,
+ "char": CHAR,
+ "check": UNUSED,
+ "column": COLUMN,
+ "condition": UNUSED,
+ "constraint": CONSTRAINT,
+ "continue": UNUSED,
+ "substr": SUBSTR,
+ "create": CREATE,
+ "cross": CROSS,
+ "current_date": CURRENT_DATE,
+ "current_time": CURRENT_TIME,
+ "current_timestamp": CURRENT_TIMESTAMP,
+ "current_user": UNUSED,
+ "cursor": UNUSED,
+ "day_hour": UNUSED,
+ "day_microsecond": UNUSED,
+ "day_minute": UNUSED,
+ "day_second": UNUSED,
+ "date": DATE,
+ "datetime": DATETIME,
+ "dec": UNUSED,
+ "decimal": DECIMAL,
+ "declare": UNUSED,
+ "default": DEFAULT,
+ "delayed": UNUSED,
+ "delete": DELETE,
+ "desc": DESC,
+ "describe": DESCRIBE,
+ "deterministic": UNUSED,
+ "distinct": DISTINCT,
+ "distinctrow": UNUSED,
+ "div": DIV,
+ "double": DOUBLE,
+ "drop": DROP,
+ "each": UNUSED,
+ "else": ELSE,
+ "elseif": UNUSED,
+ "enclosed": UNUSED,
+ "end": END,
+ "escape": ESCAPE,
+ "escaped": UNUSED,
+ "exists": EXISTS,
+ "exit": UNUSED,
+ "false": FALSE,
+ "fetch": UNUSED,
+ "float": FLOAT_TYPE,
+ "float4": UNUSED,
+ "float8": UNUSED,
+ "foreign": FOREIGN,
+ "from": FROM,
+ "generated": UNUSED,
+ "get": UNUSED,
+ "grant": UNUSED,
+ "group": GROUP,
+ "group_concat": GROUP_CONCAT,
+ "having": HAVING,
+ "high_priority": UNUSED,
+ "hour_microsecond": UNUSED,
+ "hour_minute": UNUSED,
+ "hour_second": UNUSED,
+ "if": IF,
+ "ignore": IGNORE,
+ "in": IN,
+ "index": INDEX,
+ "infile": UNUSED,
+ "inout": UNUSED,
+ "inner": INNER,
+ "insensitive": UNUSED,
+ "insert": INSERT,
+ "int": INT,
+ "int1": UNUSED,
+ "int2": UNUSED,
+ "int3": UNUSED,
+ "int4": UNUSED,
+ "int8": UNUSED,
+ "integer": INTEGER,
+ "interval": INTERVAL,
+ "into": INTO,
+ "io_after_gtids": UNUSED,
+ "is": IS,
+ "iterate": UNUSED,
+ "join": JOIN,
+ "key": KEY,
+ "kill": UNUSED,
+ "last_insert_id": LAST_INSERT_ID,
+ "leading": UNUSED,
+ "leave": UNUSED,
+ "left": LEFT,
+ "like": LIKE,
+ "limit": LIMIT,
+ "linear": UNUSED,
+ "lines": UNUSED,
+ "load": UNUSED,
+ "long": UNUSED,
+ "longblob": LONGBLOB,
+ "longtext": LONGTEXT,
+ "loop": UNUSED,
+ "low_priority": UNUSED,
+ "master_bind": UNUSED,
+ "mediumblob": MEDIUMBLOB,
+ "mediumint": MEDIUMINT,
+ "mediumtext": MEDIUMTEXT,
+ "middleint": UNUSED,
+ "minute_microsecond": UNUSED,
+ "minute_second": UNUSED,
+ "mod": MOD,
+ "modifies": UNUSED,
+ "natural": NATURAL,
+ "nchar": NCHAR,
+ "not": NOT,
+ "no_write_to_binlog": UNUSED,
+ "null": NULL,
+ "numeric": NUMERIC,
+ "offset": OFFSET,
+ "on": ON,
+ "optimizer_costs": UNUSED,
+ "option": UNUSED,
+ "optionally": UNUSED,
+ "or": OR,
+ "order": ORDER,
+ "out": UNUSED,
+ "outer": OUTER,
+ "outfile": UNUSED,
+ "precision": UNUSED,
+ "primary": PRIMARY,
+ "range": UNUSED,
+ "reads": UNUSED,
+ "read_write": UNUSED,
+ "real": REAL,
+ "references": UNUSED,
+ "regexp": REGEXP,
+ "release": UNUSED,
+ "rename": RENAME,
+ "repeat": UNUSED,
+ "replace": REPLACE,
+ "require": UNUSED,
+ "resignal": UNUSED,
+ "restrict": UNUSED,
+ "return": UNUSED,
+ "revoke": UNUSED,
+ "right": RIGHT,
+ "rlike": REGEXP,
+ "schemas": UNUSED,
+ "second_microsecond": UNUSED,
+ "select": SELECT,
+ "sensitive": UNUSED,
+ "separator": SEPARATOR,
+ "set": SET,
+ "show": SHOW,
+ "signal": UNUSED,
+ "signed": SIGNED,
+ "smallint": SMALLINT,
+ "specific": UNUSED,
+ "sql": UNUSED,
+ "sqlexception": UNUSED,
+ "sqlstate": UNUSED,
+ "sqlwarning": UNUSED,
+ "sql_big_result": UNUSED,
+ "sql_calc_found_rows": UNUSED,
+ "sql_small_result": UNUSED,
+ "ssl": UNUSED,
+ "starting": UNUSED,
+ "stored": UNUSED,
+ "string": STRING,
+ "table": TABLE,
+ "tables": TABLES,
+ "terminated": UNUSED,
+ "text": TEXT,
+ "then": THEN,
+ "time": TIME,
+ "timestamp": TIMESTAMP,
+ "tinyblob": TINYBLOB,
+ "tinyint": TINYINT,
+ "tinytext": TINYTEXT,
+ "to": TO,
+ "trailing": UNUSED,
+ "true": TRUE,
+ "undo": UNUSED,
+ "union": UNION,
+ "unique": UNIQUE,
+ "unlock": UNUSED,
+ "unsigned": UNSIGNED,
+ "update": UPDATE,
+ "usage": UNUSED,
+ "using": USING,
+ "values": VALUES,
+ "varchar": VARCHAR,
+ "varcharacter": UNUSED,
+ "varying": UNUSED,
+ "virtual": UNUSED,
+ "when": WHEN,
+ "where": WHERE,
+ "while": UNUSED,
+ "xor": UNUSED,
+ "year": YEAR,
+ "year_month": UNUSED,
+ "zerofill": ZEROFILL,
+}
+
+// keywordStrings contains the reverse mapping of token to keyword strings
+var keywordStrings = map[int]string{}
+
+func init() {
+ for str, id := range keywords {
+ if id == UNUSED {
+ continue
+ }
+ keywordStrings[id] = str
+ }
+}
+
+// KeywordString returns the string corresponding to the given keyword
+func KeywordString(id int) string {
+ str, ok := keywordStrings[id]
+ if !ok {
+ return ""
+ }
+ return str
+}
+
+// Lex returns the next token form the Tokenizer.
+// This function is used by go yacc.
+func (tkn *Tokenizer) Lex(lval *yySymType) int {
+ typ, val := tkn.Scan()
+ for typ == COMMENT {
+ if tkn.AllowComments {
+ break
+ }
+ typ, val = tkn.Scan()
+ }
+ lval.bytes = val
+ tkn.lastToken = val
+ return typ
+}
+
+// Error is called by go yacc if there's a parsing error.
+func (tkn *Tokenizer) Error(err string) {
+ buf := &bytes2.Buffer{}
+ if tkn.lastToken != nil {
+ fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken)
+ } else {
+ fmt.Fprintf(buf, "%s at position %v", err, tkn.Position)
+ }
+ tkn.LastError = errors.New(buf.String())
+
+ // Try and re-sync to the next statement
+ if tkn.lastChar != ';' {
+ tkn.skipStatement()
+ }
+}
+
+// Scan scans the tokenizer for the next token and returns
+// the token type and an optional value.
+func (tkn *Tokenizer) Scan() (int, []byte) {
+ if tkn.specialComment != nil {
+ // Enter specialComment scan mode.
+ // for scanning such kind of comment: /*! MySQL-specific code */
+ specialComment := tkn.specialComment
+ tok, val := specialComment.Scan()
+ if tok != 0 {
+ // return the specialComment scan result as the result
+ return tok, val
+ }
+ // leave specialComment scan mode after all stream consumed.
+ tkn.specialComment = nil
+ }
+ if tkn.lastChar == 0 {
+ tkn.next()
+ }
+
+ if tkn.ForceEOF {
+ tkn.skipStatement()
+ return 0, nil
+ }
+
+ tkn.skipBlank()
+ switch ch := tkn.lastChar; {
+ case isLetter(ch):
+ tkn.next()
+ if ch == 'X' || ch == 'x' {
+ if tkn.lastChar == '\'' {
+ tkn.next()
+ return tkn.scanHex()
+ }
+ }
+ isDbSystemVariable := false
+ if ch == '@' && tkn.lastChar == '@' {
+ isDbSystemVariable = true
+ }
+ return tkn.scanIdentifier(byte(ch), isDbSystemVariable)
+ case isDigit(ch):
+ return tkn.scanNumber(false)
+ case ch == ':':
+ return tkn.scanBindVar()
+ case ch == ';' && tkn.multi:
+ return 0, nil
+ default:
+ tkn.next()
+ switch ch {
+ case eofChar:
+ return 0, nil
+ case '=', ',', ';', '(', ')', '+', '*', '%', '^', '~':
+ return int(ch), nil
+ case '&':
+ if tkn.lastChar == '&' {
+ tkn.next()
+ return AND, nil
+ }
+ return int(ch), nil
+ case '|':
+ if tkn.lastChar == '|' {
+ tkn.next()
+ return OR, nil
+ }
+ return int(ch), nil
+ case '?':
+ tkn.posVarIndex++
+ buf := new(bytes2.Buffer)
+ fmt.Fprintf(buf, ":v%d", tkn.posVarIndex)
+ return VALUE_ARG, buf.Bytes()
+ case '.':
+ if isDigit(tkn.lastChar) {
+ return tkn.scanNumber(true)
+ }
+ return int(ch), nil
+ case '/':
+ switch tkn.lastChar {
+ case '/':
+ tkn.next()
+ return tkn.scanCommentType1("//")
+ case '*':
+ tkn.next()
+ switch tkn.lastChar {
+ case '!':
+ return tkn.scanMySQLSpecificComment()
+ default:
+ return tkn.scanCommentType2()
+ }
+ default:
+ return int(ch), nil
+ }
+ case '#':
+ return tkn.scanCommentType1("#")
+ case '-':
+ switch tkn.lastChar {
+ case '-':
+ tkn.next()
+ return tkn.scanCommentType1("--")
+ }
+ return int(ch), nil
+ case '<':
+ switch tkn.lastChar {
+ case '>':
+ tkn.next()
+ return NULL_SAFE_NOTEQUAL, nil
+ case '<':
+ tkn.next()
+ return SHIFT_LEFT, nil
+ case '=':
+ tkn.next()
+ return LE, nil
+ default:
+ return int(ch), nil
+ }
+ case '>':
+ switch tkn.lastChar {
+ case '=':
+ tkn.next()
+ return GE, nil
+ case '>':
+ tkn.next()
+ return SHIFT_RIGHT, nil
+ default:
+ return int(ch), nil
+ }
+ case '!':
+ if tkn.lastChar == '=' {
+ tkn.next()
+ return NE, nil
+ }
+ return int(ch), nil
+ case '\'', '"':
+ return tkn.scanString(ch, STRING)
+ case '`':
+ return tkn.scanLiteralIdentifier()
+ default:
+ return LEX_ERROR, []byte{byte(ch)}
+ }
+ }
+}
+
+// skipStatement scans until the EOF, or end of statement is encountered.
+func (tkn *Tokenizer) skipStatement() {
+ ch := tkn.lastChar
+ for ch != ';' && ch != eofChar {
+ tkn.next()
+ ch = tkn.lastChar
+ }
+}
+
+func (tkn *Tokenizer) skipBlank() {
+ ch := tkn.lastChar
+ for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' {
+ tkn.next()
+ ch = tkn.lastChar
+ }
+}
+
+func (tkn *Tokenizer) scanIdentifier(firstByte byte, isDbSystemVariable bool) (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ buffer.WriteByte(firstByte)
+ for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || (isDbSystemVariable && isCarat(tkn.lastChar)) {
+ buffer.WriteByte(byte(tkn.lastChar))
+ tkn.next()
+ }
+ lowered := bytes.ToLower(buffer.Bytes())
+ loweredStr := string(lowered)
+ if keywordID, found := keywords[loweredStr]; found {
+ return keywordID, lowered
+ }
+ // dual must always be case-insensitive
+ if loweredStr == "dual" {
+ return ID, lowered
+ }
+ return ID, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanHex() (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ tkn.scanMantissa(16, buffer)
+ if tkn.lastChar != '\'' {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ tkn.next()
+ if buffer.Len()%2 != 0 {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ return HEX, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ backTickSeen := false
+ for {
+ if backTickSeen {
+ if tkn.lastChar != '`' {
+ break
+ }
+ backTickSeen = false
+ buffer.WriteByte('`')
+ tkn.next()
+ continue
+ }
+ // The previous char was not a backtick.
+ switch tkn.lastChar {
+ case '`':
+ backTickSeen = true
+ case eofChar:
+ // Premature EOF.
+ return LEX_ERROR, buffer.Bytes()
+ default:
+ buffer.WriteByte(byte(tkn.lastChar))
+ }
+ tkn.next()
+ }
+ if buffer.Len() == 0 {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ return ID, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanBindVar() (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ buffer.WriteByte(byte(tkn.lastChar))
+ token := VALUE_ARG
+ tkn.next()
+ if tkn.lastChar == ':' {
+ token = LIST_ARG
+ buffer.WriteByte(byte(tkn.lastChar))
+ tkn.next()
+ }
+ if !isLetter(tkn.lastChar) {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' {
+ buffer.WriteByte(byte(tkn.lastChar))
+ tkn.next()
+ }
+ return token, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) {
+ for digitVal(tkn.lastChar) < base {
+ tkn.consumeNext(buffer)
+ }
+}
+
+func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) {
+ token := INTEGRAL
+ buffer := &bytes2.Buffer{}
+ if seenDecimalPoint {
+ token = FLOAT
+ buffer.WriteByte('.')
+ tkn.scanMantissa(10, buffer)
+ goto exponent
+ }
+
+ // 0x construct.
+ if tkn.lastChar == '0' {
+ tkn.consumeNext(buffer)
+ if tkn.lastChar == 'x' || tkn.lastChar == 'X' {
+ token = HEXNUM
+ tkn.consumeNext(buffer)
+ tkn.scanMantissa(16, buffer)
+ goto exit
+ }
+ }
+
+ tkn.scanMantissa(10, buffer)
+
+ if tkn.lastChar == '.' {
+ token = FLOAT
+ tkn.consumeNext(buffer)
+ tkn.scanMantissa(10, buffer)
+ }
+
+exponent:
+ if tkn.lastChar == 'e' || tkn.lastChar == 'E' {
+ token = FLOAT
+ tkn.consumeNext(buffer)
+ if tkn.lastChar == '+' || tkn.lastChar == '-' {
+ tkn.consumeNext(buffer)
+ }
+ tkn.scanMantissa(10, buffer)
+ }
+
+exit:
+ // A letter cannot immediately follow a number.
+ if isLetter(tkn.lastChar) {
+ return LEX_ERROR, buffer.Bytes()
+ }
+
+ return token, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) {
+ var buffer bytes2.Buffer
+ for {
+ ch := tkn.lastChar
+ if ch == eofChar {
+ // Unterminated string.
+ return LEX_ERROR, buffer.Bytes()
+ }
+
+ if ch != delim && ch != '\\' {
+ buffer.WriteByte(byte(ch))
+
+ // Scan ahead to the next interesting character.
+ start := tkn.bufPos
+ for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ {
+ ch = uint16(tkn.buf[tkn.bufPos])
+ if ch == delim || ch == '\\' {
+ break
+ }
+ }
+
+ buffer.Write(tkn.buf[start:tkn.bufPos])
+ tkn.Position += (tkn.bufPos - start)
+
+ if tkn.bufPos >= tkn.bufSize {
+ // Reached the end of the buffer without finding a delim or
+ // escape character.
+ tkn.next()
+ continue
+ }
+
+ tkn.bufPos++
+ tkn.Position++
+ }
+ tkn.next() // Read one past the delim or escape character.
+
+ if ch == '\\' {
+ if tkn.lastChar == eofChar {
+ // String terminates mid escape character.
+ return LEX_ERROR, buffer.Bytes()
+ }
+ if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape {
+ ch = tkn.lastChar
+ } else {
+ ch = uint16(decodedChar)
+ }
+
+ } else if ch == delim && tkn.lastChar != delim {
+ // Correctly terminated string, which is not a double delim.
+ break
+ }
+
+ buffer.WriteByte(byte(ch))
+ tkn.next()
+ }
+
+ return typ, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ buffer.WriteString(prefix)
+ for tkn.lastChar != eofChar {
+ if tkn.lastChar == '\n' {
+ tkn.consumeNext(buffer)
+ break
+ }
+ tkn.consumeNext(buffer)
+ }
+ return COMMENT, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanCommentType2() (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ buffer.WriteString("/*")
+ for {
+ if tkn.lastChar == '*' {
+ tkn.consumeNext(buffer)
+ if tkn.lastChar == '/' {
+ tkn.consumeNext(buffer)
+ break
+ }
+ continue
+ }
+ if tkn.lastChar == eofChar {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ tkn.consumeNext(buffer)
+ }
+ return COMMENT, buffer.Bytes()
+}
+
+func (tkn *Tokenizer) scanMySQLSpecificComment() (int, []byte) {
+ buffer := &bytes2.Buffer{}
+ buffer.WriteString("/*!")
+ tkn.next()
+ for {
+ if tkn.lastChar == '*' {
+ tkn.consumeNext(buffer)
+ if tkn.lastChar == '/' {
+ tkn.consumeNext(buffer)
+ break
+ }
+ continue
+ }
+ if tkn.lastChar == eofChar {
+ return LEX_ERROR, buffer.Bytes()
+ }
+ tkn.consumeNext(buffer)
+ }
+ _, sql := ExtractMysqlComment(buffer.String())
+ tkn.specialComment = NewStringTokenizer(sql)
+ return tkn.Scan()
+}
+
+func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) {
+ if tkn.lastChar == eofChar {
+ // This should never happen.
+ panic("unexpected EOF")
+ }
+ buffer.WriteByte(byte(tkn.lastChar))
+ tkn.next()
+}
+
+func (tkn *Tokenizer) next() {
+ if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil {
+ // Try and refill the buffer
+ var err error
+ tkn.bufPos = 0
+ if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil {
+ tkn.LastError = err
+ }
+ }
+
+ if tkn.bufPos >= tkn.bufSize {
+ if tkn.lastChar != eofChar {
+ tkn.Position++
+ tkn.lastChar = eofChar
+ }
+ } else {
+ tkn.Position++
+ tkn.lastChar = uint16(tkn.buf[tkn.bufPos])
+ tkn.bufPos++
+ }
+}
+
+// reset clears any internal state.
+func (tkn *Tokenizer) reset() {
+ tkn.ParseTree = nil
+ tkn.partialDDL = nil
+ tkn.specialComment = nil
+ tkn.posVarIndex = 0
+ tkn.nesting = 0
+ tkn.ForceEOF = false
+}
+
+func isLetter(ch uint16) bool {
+ return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch == '@'
+}
+
+func isCarat(ch uint16) bool {
+ return ch == '.' || ch == '\'' || ch == '"' || ch == '`'
+}
+
+func digitVal(ch uint16) int {
+ switch {
+ case '0' <= ch && ch <= '9':
+ return int(ch) - '0'
+ case 'a' <= ch && ch <= 'f':
+ return int(ch) - 'a' + 10
+ case 'A' <= ch && ch <= 'F':
+ return int(ch) - 'A' + 10
+ }
+ return 16 // larger than any legal digit val
+}
+
+func isDigit(ch uint16) bool {
+ return '0' <= ch && ch <= '9'
+}
diff --git a/vendor/github.com/CovenantSQL/sqlparser/tracked_buffer.go b/vendor/github.com/CovenantSQL/sqlparser/tracked_buffer.go
new file mode 100644
index 000000000..ec421a5fb
--- /dev/null
+++ b/vendor/github.com/CovenantSQL/sqlparser/tracked_buffer.go
@@ -0,0 +1,140 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sqlparser
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// NodeFormatter defines the signature of a custom node formatter
+// function that can be given to TrackedBuffer for code generation.
+type NodeFormatter func(buf *TrackedBuffer, node SQLNode)
+
+// TrackedBuffer is used to rebuild a query from the ast.
+// bindLocations keeps track of locations in the buffer that
+// use bind variables for efficient future substitutions.
+// nodeFormatter is the formatting function the buffer will
+// use to format a node. By default(nil), it's FormatNode.
+// But you can supply a different formatting function if you
+// want to generate a query that's different from the default.
+type TrackedBuffer struct {
+ *bytes.Buffer
+ bindLocations []bindLocation
+ nodeFormatter NodeFormatter
+}
+
+// NewTrackedBuffer creates a new TrackedBuffer.
+func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer {
+ return &TrackedBuffer{
+ Buffer: new(bytes.Buffer),
+ nodeFormatter: nodeFormatter,
+ }
+}
+
+// WriteNode function, initiates the writing of a single SQLNode tree by passing
+// through to Myprintf with a default format string
+func (buf *TrackedBuffer) WriteNode(node SQLNode) *TrackedBuffer {
+ buf.Myprintf("%v", node)
+ return buf
+}
+
+// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v),
+// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in
+// which case it adds tracking info for future substitutions.
+//
+// The name must be something other than the usual Printf() to avoid "go vet"
+// warnings due to our custom format specifiers.
+func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) {
+ end := len(format)
+ fieldnum := 0
+ for i := 0; i < end; {
+ lasti := i
+ for i < end && format[i] != '%' {
+ i++
+ }
+ if i > lasti {
+ buf.WriteString(format[lasti:i])
+ }
+ if i >= end {
+ break
+ }
+ i++ // '%'
+ switch format[i] {
+ case 'c':
+ switch v := values[fieldnum].(type) {
+ case byte:
+ buf.WriteByte(v)
+ case rune:
+ buf.WriteRune(v)
+ default:
+ panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
+ }
+ case 's':
+ switch v := values[fieldnum].(type) {
+ case []byte:
+ buf.Write(v)
+ case string:
+ buf.WriteString(v)
+ default:
+ panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v))
+ }
+ case 'v':
+ node := values[fieldnum].(SQLNode)
+ if buf.nodeFormatter == nil {
+ node.Format(buf)
+ } else {
+ buf.nodeFormatter(buf, node)
+ }
+ case 'a':
+ buf.WriteArg(values[fieldnum].(string))
+ default:
+ panic("unexpected")
+ }
+ fieldnum++
+ i++
+ }
+}
+
+// WriteArg writes a value argument into the buffer along with
+// tracking information for future substitutions. arg must contain
+// the ":" or "::" prefix.
+func (buf *TrackedBuffer) WriteArg(arg string) {
+ buf.bindLocations = append(buf.bindLocations, bindLocation{
+ offset: buf.Len(),
+ length: len(arg),
+ })
+ buf.WriteString(arg)
+}
+
+// ParsedQuery returns a ParsedQuery that contains bind
+// locations for easy substitution.
+func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery {
+ return &ParsedQuery{Query: buf.String(), bindLocations: buf.bindLocations}
+}
+
+// HasBindVars returns true if the parsed query uses bind vars.
+func (buf *TrackedBuffer) HasBindVars() bool {
+ return len(buf.bindLocations) != 0
+}
+
+// BuildParsedQuery builds a ParsedQuery from the input.
+func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery {
+ buf := NewTrackedBuffer(nil)
+ buf.Myprintf(in, vars...)
+ return buf.ParsedQuery()
+}
diff --git a/vendor/mvdan.cc/xurls/LICENSE b/vendor/github.com/CovenantSQL/xurls/LICENSE
similarity index 100%
rename from vendor/mvdan.cc/xurls/LICENSE
rename to vendor/github.com/CovenantSQL/xurls/LICENSE
diff --git a/vendor/mvdan.cc/xurls/README.md b/vendor/github.com/CovenantSQL/xurls/README.md
similarity index 100%
rename from vendor/mvdan.cc/xurls/README.md
rename to vendor/github.com/CovenantSQL/xurls/README.md
diff --git a/vendor/mvdan.cc/xurls/schemes.go b/vendor/github.com/CovenantSQL/xurls/schemes.go
similarity index 100%
rename from vendor/mvdan.cc/xurls/schemes.go
rename to vendor/github.com/CovenantSQL/xurls/schemes.go
diff --git a/vendor/mvdan.cc/xurls/tlds.go b/vendor/github.com/CovenantSQL/xurls/tlds.go
similarity index 100%
rename from vendor/mvdan.cc/xurls/tlds.go
rename to vendor/github.com/CovenantSQL/xurls/tlds.go
diff --git a/vendor/mvdan.cc/xurls/tlds_pseudo.go b/vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go
similarity index 100%
rename from vendor/mvdan.cc/xurls/tlds_pseudo.go
rename to vendor/github.com/CovenantSQL/xurls/tlds_pseudo.go
diff --git a/vendor/mvdan.cc/xurls/xurls.go b/vendor/github.com/CovenantSQL/xurls/xurls.go
similarity index 98%
rename from vendor/mvdan.cc/xurls/xurls.go
rename to vendor/github.com/CovenantSQL/xurls/xurls.go
index a4154a97d..5a98ac7cf 100644
--- a/vendor/mvdan.cc/xurls/xurls.go
+++ b/vendor/github.com/CovenantSQL/xurls/xurls.go
@@ -2,7 +2,7 @@
// See LICENSE for licensing information
// Package xurls extracts urls from plain text using regular expressions.
-package xurls // import "mvdan.cc/xurls"
+package xurls
import (
"bytes"
diff --git a/vendor/github.com/btcsuite/btcd/btcec/signature.go b/vendor/github.com/btcsuite/btcd/btcec/signature.go
index 6026c4241..a4781b00f 100644
--- a/vendor/github.com/btcsuite/btcd/btcec/signature.go
+++ b/vendor/github.com/btcsuite/btcd/btcec/signature.go
@@ -85,6 +85,11 @@ func (sig *Signature) IsEqual(otherSig *Signature) bool {
sig.S.Cmp(otherSig.S) == 0
}
+// minSigLen is the minimum length of a DER encoded signature and is
+// when both R and S are 1 byte each.
+// 0x30 + <1-byte> + 0x02 + 0x01 + + 0x2 + 0x01 +
+const minSigLen = 8
+
func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error) {
// Originally this code used encoding/asn1 in order to parse the
// signature, but a number of problems were found with this approach.
@@ -98,9 +103,7 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
signature := &Signature{}
- // minimal message is when both numbers are 1 bytes. adding up to:
- // 0x30 + len + 0x02 + 0x01 + + 0x2 + 0x01 +
- if len(sigStr) < 8 {
+ if len(sigStr) < minSigLen {
return nil, errors.New("malformed signature: too short")
}
// 0x30
@@ -112,7 +115,10 @@ func parseSig(sigStr []byte, curve elliptic.Curve, der bool) (*Signature, error)
// length of remaining message
siglen := sigStr[index]
index++
- if int(siglen+2) > len(sigStr) {
+
+ // siglen should be less than the entire message and greater than
+ // the minimal message size.
+ if int(siglen+2) > len(sigStr) || int(siglen+2) < minSigLen {
return nil, errors.New("malformed signature: bad length")
}
// trim the slice we're working on so we only look at what matters.
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 000000000..bcfa19520
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 000000000..931ae3160
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name
+
+# Please keep the list sorted.
+
+Damian Gryski
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman
+Marc-Antoine Ruel
+Nigel Tao
+Rob Pike
+Rodolfo Carvalho
+Russ Cox
+Sebastien Binet
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 000000000..6050c10f4
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 000000000..cea12879a
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 000000000..72efb0353
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 000000000..fcd192b84
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 000000000..e6179f65e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte and patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from to will repeat the pattern
+ // once, after which we can move two bytes without moving :
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 000000000..8c9f2049b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+ var d, s, offset, length int
+ for s < len(src) {
+ switch src[s] & 0x03 {
+ case tagLiteral:
+ x := uint32(src[s] >> 2)
+ switch {
+ case x < 60:
+ s++
+ case x == 60:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-1])
+ case x == 61:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ case x == 62:
+ s += 4
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ case x == 63:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ }
+ length = int(x) + 1
+ if length <= 0 {
+ return decodeErrCodeUnsupportedLiteralLength
+ }
+ if length > len(dst)-d || length > len(src)-s {
+ return decodeErrCodeCorrupt
+ }
+ copy(dst[d:], src[s:s+length])
+ d += length
+ s += length
+ continue
+
+ case tagCopy1:
+ s += 2
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 4 + int(src[s-2])>>2&0x7
+ offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+ case tagCopy2:
+ s += 3
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-3])>>2
+ offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+ case tagCopy4:
+ s += 5
+ if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+ return decodeErrCodeCorrupt
+ }
+ length = 1 + int(src[s-5])>>2
+ offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ }
+
+ if offset <= 0 || d < offset || length > len(dst)-d {
+ return decodeErrCodeCorrupt
+ }
+ // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+ // the built-in copy function, this byte-by-byte copy always runs
+ // forwards, even if the slices overlap. Conceptually, this is:
+ //
+ // d += forwardCopy(dst[d:d+length], dst[d-offset:])
+ for end := d + length; d != end; d++ {
+ dst[d] = dst[d-offset]
+ }
+ }
+ if d != len(dst) {
+ return decodeErrCodeCorrupt
+ }
+ return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 000000000..8d393e904
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 000000000..150d91bc8
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 000000000..adfd979fe
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 000000000..dbcae905e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+ b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+ b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+ return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+ uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+ i, n := 0, uint(len(lit)-1)
+ switch {
+ case n < 60:
+ dst[0] = uint8(n)<<2 | tagLiteral
+ i = 1
+ case n < 1<<8:
+ dst[0] = 60<<2 | tagLiteral
+ dst[1] = uint8(n)
+ i = 2
+ default:
+ dst[0] = 61<<2 | tagLiteral
+ dst[1] = uint8(n)
+ dst[2] = uint8(n >> 8)
+ i = 3
+ }
+ return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+// dst is long enough to hold the encoded bytes
+// 1 <= offset && offset <= 65535
+// 4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+ i := 0
+ // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+ // threshold for this loop is a little higher (at 68 = 64 + 4), and the
+ // length emitted down below is is a little lower (at 60 = 64 - 4), because
+ // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+ // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+ // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+ // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+ // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+ // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+ for length >= 68 {
+ // Emit a length 64 copy, encoded as 3 bytes.
+ dst[i+0] = 63<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 64
+ }
+ if length > 64 {
+ // Emit a length 60 copy, encoded as 3 bytes.
+ dst[i+0] = 59<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ i += 3
+ length -= 60
+ }
+ if length >= 12 || offset >= 2048 {
+ // Emit the remaining copy, encoded as 3 bytes.
+ dst[i+0] = uint8(length-1)<<2 | tagCopy2
+ dst[i+1] = uint8(offset)
+ dst[i+2] = uint8(offset >> 8)
+ return i + 3
+ }
+ // Emit the remaining copy, encoded as 2 bytes.
+ dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+ dst[i+1] = uint8(offset)
+ return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+// 0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+ for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+ }
+ return j
+}
+
+func hash(u, shift uint32) uint32 {
+ return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+// len(dst) >= MaxEncodedLen(len(src)) &&
+// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+ // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+ // The table element type is uint16, as s < sLimit and sLimit < len(src)
+ // and len(src) <= maxBlockSize and maxBlockSize == 65536.
+ const (
+ maxTableSize = 1 << 14
+ // tableMask is redundant, but helps the compiler eliminate bounds
+ // checks.
+ tableMask = maxTableSize - 1
+ )
+ shift := uint32(32 - 8)
+ for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ shift--
+ }
+ // In Go, all array elements are zero-initialized, so there is no advantage
+ // to a smaller tableSize per se. However, it matches the C++ algorithm,
+ // and in the asm versions of this code, we can get away with zeroing only
+ // the first tableSize elements.
+ var table [maxTableSize]uint16
+
+ // sLimit is when to stop looking for offset/length copies. The inputMargin
+ // lets us use a fast path for emitLiteral in the main loop, while we are
+ // looking for copies.
+ sLimit := len(src) - inputMargin
+
+ // nextEmit is where in src the next emitLiteral should start from.
+ nextEmit := 0
+
+ // The encoded form must start with a literal, as there are no previous
+ // bytes to copy, so we start looking for hash matches at s == 1.
+ s := 1
+ nextHash := hash(load32(src, s), shift)
+
+ for {
+ // Copied from the C++ snappy implementation:
+ //
+ // Heuristic match skipping: If 32 bytes are scanned with no matches
+ // found, start looking only at every other byte. If 32 more bytes are
+ // scanned (or skipped), look at every third byte, etc.. When a match
+ // is found, immediately go back to looking at every byte. This is a
+ // small loss (~5% performance, ~0.1% density) for compressible data
+ // due to more bookkeeping, but for non-compressible data (such as
+ // JPEG) it's a huge win since the compressor quickly "realizes" the
+ // data is incompressible and doesn't bother looking for matches
+ // everywhere.
+ //
+ // The "skip" variable keeps track of how many bytes there are since
+ // the last match; dividing it by 32 (ie. right-shifting by five) gives
+ // the number of bytes to move ahead for each iteration.
+ skip := 32
+
+ nextS := s
+ candidate := 0
+ for {
+ s = nextS
+ bytesBetweenHashLookups := skip >> 5
+ nextS = s + bytesBetweenHashLookups
+ skip += bytesBetweenHashLookups
+ if nextS > sLimit {
+ goto emitRemainder
+ }
+ candidate = int(table[nextHash&tableMask])
+ table[nextHash&tableMask] = uint16(s)
+ nextHash = hash(load32(src, nextS), shift)
+ if load32(src, s) == load32(src, candidate) {
+ break
+ }
+ }
+
+ // A 4-byte match has been found. We'll later see if more than 4 bytes
+ // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+ // them as literal bytes.
+ d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ // Call emitCopy, and then see if another emitCopy could be our next
+ // move. Repeat until we find no match for the input immediately after
+ // what was consumed by the last emitCopy call.
+ //
+ // If we exit this loop normally then we need to call emitLiteral next,
+ // though we don't yet know how big the literal will be. We handle that
+ // by proceeding to the next iteration of the main loop. We also can
+ // exit this loop via goto if we get close to exhausting the input.
+ for {
+ // Invariant: we have a 4-byte match at s, and no need to emit any
+ // literal bytes prior to s.
+ base := s
+
+ // Extend the 4-byte match as long as possible.
+ //
+ // This is an inlined version of:
+ // s = extendMatch(src, candidate+4, s+4)
+ s += 4
+ for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+ }
+
+ d += emitCopy(dst[d:], base-candidate, s-base)
+ nextEmit = s
+ if s >= sLimit {
+ goto emitRemainder
+ }
+
+ // We could immediately start working at s now, but to improve
+ // compression we first update the hash table at s-1 and at s. If
+ // another emitCopy is not our next move, also calculate nextHash
+ // at s+1. At least on GOARCH=amd64, these three hash calculations
+ // are faster as one load64 call (with some shifts) instead of
+ // three load32 calls.
+ x := load64(src, s-1)
+ prevHash := hash(uint32(x>>0), shift)
+ table[prevHash&tableMask] = uint16(s - 1)
+ currHash := hash(uint32(x>>8), shift)
+ candidate = int(table[currHash&tableMask])
+ table[currHash&tableMask] = uint16(s)
+ if uint32(x>>8) != load32(src, candidate) {
+ nextHash = hash(uint32(x>>16), shift)
+ s++
+ break
+ }
+ }
+ }
+
+emitRemainder:
+ if nextEmit < len(src) {
+ d += emitLiteral(dst[d:], src[nextEmit:])
+ }
+ return d
+}
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 000000000..ece692ea4
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/juju/errors/LICENSE b/vendor/github.com/juju/errors/LICENSE
new file mode 100644
index 000000000..ade9307b3
--- /dev/null
+++ b/vendor/github.com/juju/errors/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+ This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+ 0. Additional Definitions.
+
+ As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+ "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+ An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+ A "Combined Work" is a work produced by combining or linking an
+Application with the Library. The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+ The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+ The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+ 1. Exception to Section 3 of the GNU GPL.
+
+ You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+ 2. Conveying Modified Versions.
+
+ If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+ a) under this License, provided that you make a good faith effort to
+ ensure that, in the event an Application does not supply the
+ function or data, the facility still operates, and performs
+ whatever part of its purpose remains meaningful, or
+
+ b) under the GNU GPL, with none of the additional permissions of
+ this License applicable to that copy.
+
+ 3. Object Code Incorporating Material from Library Header Files.
+
+ The object code form of an Application may incorporate material from
+a header file that is part of the Library. You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+ a) Give prominent notice with each copy of the object code that the
+ Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the object code with a copy of the GNU GPL and this license
+ document.
+
+ 4. Combined Works.
+
+ You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+ a) Give prominent notice with each copy of the Combined Work that
+ the Library is used in it and that the Library and its use are
+ covered by this License.
+
+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
+ document.
+
+ c) For a Combined Work that displays copyright notices during
+ execution, include the copyright notice for the Library among
+ these notices, as well as a reference directing the user to the
+ copies of the GNU GPL and this license document.
+
+ d) Do one of the following:
+
+ 0) Convey the Minimal Corresponding Source under the terms of this
+ License, and the Corresponding Application Code in a form
+ suitable for, and under terms that permit, the user to
+ recombine or relink the Application with a modified version of
+ the Linked Version to produce a modified Combined Work, in the
+ manner specified by section 6 of the GNU GPL for conveying
+ Corresponding Source.
+
+ 1) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (a) uses at run time
+ a copy of the Library already present on the user's computer
+ system, and (b) will operate properly with a modified version
+ of the Library that is interface-compatible with the Linked
+ Version.
+
+ e) Provide Installation Information, but only if you would otherwise
+ be required to provide such information under section 6 of the
+ GNU GPL, and only to the extent that such information is
+ necessary to install and execute a modified version of the
+ Combined Work produced by recombining or relinking the
+ Application with a modified version of the Linked Version. (If
+ you use option 4d0, the Installation Information must accompany
+ the Minimal Corresponding Source and Corresponding Application
+ Code. If you use option 4d1, you must provide the Installation
+ Information in the manner specified by section 6 of the GNU GPL
+ for conveying Corresponding Source.)
+
+ 5. Combined Libraries.
+
+ You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+ a) Accompany the combined library with a copy of the same work based
+ on the Library, uncombined with any other library facilities,
+ conveyed under the terms of this License.
+
+ b) Give prominent notice with the combined library that part of it
+ is a work based on the Library, and explaining where to find the
+ accompanying uncombined form of the same work.
+
+ 6. Revised Versions of the GNU Lesser General Public License.
+
+ The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+ If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/vendor/github.com/juju/errors/Makefile b/vendor/github.com/juju/errors/Makefile
new file mode 100644
index 000000000..41836d684
--- /dev/null
+++ b/vendor/github.com/juju/errors/Makefile
@@ -0,0 +1,24 @@
+PROJECT := github.com/juju/errors
+
+.PHONY: check-licence check-go check docs
+
+check: check-licence check-go
+ go test $(PROJECT)/...
+
+check-licence:
+ @(fgrep -rl "Licensed under the LGPLv3" --exclude *.s .;\
+ fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" --exclude *.s .;\
+ find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \
+ xargs -I {} echo FAIL: licence missed: {}
+
+check-go:
+ $(eval GOFMT := $(strip $(shell gofmt -l .| sed -e "s/^/ /g")))
+ @(if [ x$(GOFMT) != x"" ]; then \
+ echo go fmt is sad: $(GOFMT); \
+ exit 1; \
+ fi )
+ @(go tool vet -all -composites=false -copylocks=false .)
+
+docs:
+ godoc2md github.com/juju/errors > README.md
+ sed -i 's|\[godoc-link-here\]|[](https://godoc.org/github.com/juju/errors)|' README.md
diff --git a/vendor/github.com/juju/errors/README.md b/vendor/github.com/juju/errors/README.md
new file mode 100644
index 000000000..4584d100e
--- /dev/null
+++ b/vendor/github.com/juju/errors/README.md
@@ -0,0 +1,707 @@
+
+# errors
+ import "github.com/juju/errors"
+
+[](https://godoc.org/github.com/juju/errors)
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+
+ if err := SomeFunc(); err != nil {
+ return err
+ }
+
+This instead becomes:
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Trace(err)
+ }
+
+which just records the file and line number of the Trace call, or
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Annotate(err, "more context")
+ }
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does. The underlying cause of the error is available using the
+`Cause` function.
+
+
+ os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+
+ err := errors.Errorf("original")
+ err = errors.Annotatef(err, "context")
+ err = errors.Annotatef(err, "more context")
+ err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+
+ errors.ErrorStack(err)
+
+will return something like:
+
+
+ first error
+ github.com/juju/errors/annotation_test.go:193:
+ github.com/juju/errors/annotation_test.go:194: annotation
+ github.com/juju/errors/annotation_test.go:195:
+ github.com/juju/errors/annotation_test.go:196: more context
+ github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+
+ if err := FindField(field); err != nil {
+ return errors.Wrap(err, errors.NotFoundf(field))
+ }
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+
+
+
+
+
+## func AlreadyExistsf
+``` go
+func AlreadyExistsf(format string, args ...interface{}) error
+```
+AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+
+
+## func Annotate
+``` go
+func Annotate(other error, message string) error
+```
+Annotate is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Annotate(err, "failed to frombulate")
+ }
+
+
+## func Annotatef
+``` go
+func Annotatef(other error, format string, args ...interface{}) error
+```
+Annotatef is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Annotatef(err, "failed to frombulate the %s", arg)
+ }
+
+
+## func BadRequestf
+``` go
+func BadRequestf(format string, args ...interface{}) error
+```
+BadRequestf returns an error which satisfies IsBadRequest().
+
+
+## func Cause
+``` go
+func Cause(err error) error
+```
+Cause returns the cause of the given error. This will be either the
+original error, or the result of a Wrap or Mask call.
+
+Cause is the usual way to diagnose errors that may have been wrapped by
+the other errors functions.
+
+
+## func DeferredAnnotatef
+``` go
+func DeferredAnnotatef(err *error, format string, args ...interface{})
+```
+DeferredAnnotatef annotates the given error (when it is not nil) with the given
+format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+does nothing. This method is used in a defer statement in order to annotate any
+resulting error with the same message.
+
+For example:
+
+
+ defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+
+
+## func Details
+``` go
+func Details(err error) string
+```
+Details returns information about the stack of errors wrapped by err, in
+the format:
+
+
+ [{filename:99: error one} {otherfile:55: cause of error one}]
+
+This is a terse alternative to ErrorStack as it returns a single line.
+
+
+## func ErrorStack
+``` go
+func ErrorStack(err error) string
+```
+ErrorStack returns a string representation of the annotated error. If the
+error passed as the parameter is not an annotated error, the result is
+simply the result of the Error() method on that error.
+
+If the error is an annotated error, a multi-line string is returned where
+each line represents one entry in the annotation stack. The full filename
+from the call stack is used in the output.
+
+
+ first error
+ github.com/juju/errors/annotation_test.go:193:
+ github.com/juju/errors/annotation_test.go:194: annotation
+ github.com/juju/errors/annotation_test.go:195:
+ github.com/juju/errors/annotation_test.go:196: more context
+ github.com/juju/errors/annotation_test.go:197:
+
+
+## func Errorf
+``` go
+func Errorf(format string, args ...interface{}) error
+```
+Errorf creates a new annotated error and records the location that the
+error is created. This should be a drop in replacement for fmt.Errorf.
+
+For example:
+
+
+ return errors.Errorf("validation failed: %s", message)
+
+
+## func Forbiddenf
+``` go
+func Forbiddenf(format string, args ...interface{}) error
+```
+Forbiddenf returns an error which satistifes IsForbidden()
+
+
+## func IsAlreadyExists
+``` go
+func IsAlreadyExists(err error) bool
+```
+IsAlreadyExists reports whether the error was created with
+AlreadyExistsf() or NewAlreadyExists().
+
+
+## func IsBadRequest
+``` go
+func IsBadRequest(err error) bool
+```
+IsBadRequest reports whether err was created with BadRequestf() or
+NewBadRequest().
+
+
+## func IsForbidden
+``` go
+func IsForbidden(err error) bool
+```
+IsForbidden reports whether err was created with Forbiddenf() or
+NewForbidden().
+
+
+## func IsMethodNotAllowed
+``` go
+func IsMethodNotAllowed(err error) bool
+```
+IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
+NewMethodNotAllowed().
+
+
+## func IsNotAssigned
+``` go
+func IsNotAssigned(err error) bool
+```
+IsNotAssigned reports whether err was created with NotAssignedf() or
+NewNotAssigned().
+
+
+## func IsNotFound
+``` go
+func IsNotFound(err error) bool
+```
+IsNotFound reports whether err was created with NotFoundf() or
+NewNotFound().
+
+
+## func IsNotImplemented
+``` go
+func IsNotImplemented(err error) bool
+```
+IsNotImplemented reports whether err was created with
+NotImplementedf() or NewNotImplemented().
+
+
+## func IsNotProvisioned
+``` go
+func IsNotProvisioned(err error) bool
+```
+IsNotProvisioned reports whether err was created with NotProvisionedf() or
+NewNotProvisioned().
+
+
+## func IsNotSupported
+``` go
+func IsNotSupported(err error) bool
+```
+IsNotSupported reports whether the error was created with
+NotSupportedf() or NewNotSupported().
+
+
+## func IsNotValid
+``` go
+func IsNotValid(err error) bool
+```
+IsNotValid reports whether the error was created with NotValidf() or
+NewNotValid().
+
+
+## func IsUnauthorized
+``` go
+func IsUnauthorized(err error) bool
+```
+IsUnauthorized reports whether err was created with Unauthorizedf() or
+NewUnauthorized().
+
+
+## func IsUserNotFound
+``` go
+func IsUserNotFound(err error) bool
+```
+IsUserNotFound reports whether err was created with UserNotFoundf() or
+NewUserNotFound().
+
+
+## func Mask
+``` go
+func Mask(other error) error
+```
+Mask hides the underlying error type, and records the location of the masking.
+
+
+## func Maskf
+``` go
+func Maskf(other error, format string, args ...interface{}) error
+```
+Mask masks the given error with the given format string and arguments (like
+fmt.Sprintf), returning a new error that maintains the error stack, but
+hides the underlying error type. The error string still contains the full
+annotations. If you want to hide the annotations, call Wrap.
+
+
+## func MethodNotAllowedf
+``` go
+func MethodNotAllowedf(format string, args ...interface{}) error
+```
+MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
+
+
+## func New
+``` go
+func New(message string) error
+```
+New is a drop in replacement for the standard library errors module that records
+the location that the error is created.
+
+For example:
+
+
+ return errors.New("validation failed")
+
+
+## func NewAlreadyExists
+``` go
+func NewAlreadyExists(err error, msg string) error
+```
+NewAlreadyExists returns an error which wraps err and satisfies
+IsAlreadyExists().
+
+
+## func NewBadRequest
+``` go
+func NewBadRequest(err error, msg string) error
+```
+NewBadRequest returns an error which wraps err that satisfies
+IsBadRequest().
+
+
+## func NewForbidden
+``` go
+func NewForbidden(err error, msg string) error
+```
+NewForbidden returns an error which wraps err that satisfies
+IsForbidden().
+
+
+## func NewMethodNotAllowed
+``` go
+func NewMethodNotAllowed(err error, msg string) error
+```
+NewMethodNotAllowed returns an error which wraps err that satisfies
+IsMethodNotAllowed().
+
+
+## func NewNotAssigned
+``` go
+func NewNotAssigned(err error, msg string) error
+```
+NewNotAssigned returns an error which wraps err that satisfies
+IsNotAssigned().
+
+
+## func NewNotFound
+``` go
+func NewNotFound(err error, msg string) error
+```
+NewNotFound returns an error which wraps err that satisfies
+IsNotFound().
+
+
+## func NewNotImplemented
+``` go
+func NewNotImplemented(err error, msg string) error
+```
+NewNotImplemented returns an error which wraps err and satisfies
+IsNotImplemented().
+
+
+## func NewNotProvisioned
+``` go
+func NewNotProvisioned(err error, msg string) error
+```
+NewNotProvisioned returns an error which wraps err that satisfies
+IsNotProvisioned().
+
+
+## func NewNotSupported
+``` go
+func NewNotSupported(err error, msg string) error
+```
+NewNotSupported returns an error which wraps err and satisfies
+IsNotSupported().
+
+
+## func NewNotValid
+``` go
+func NewNotValid(err error, msg string) error
+```
+NewNotValid returns an error which wraps err and satisfies IsNotValid().
+
+
+## func NewUnauthorized
+``` go
+func NewUnauthorized(err error, msg string) error
+```
+NewUnauthorized returns an error which wraps err and satisfies
+IsUnauthorized().
+
+
+## func NewUserNotFound
+``` go
+func NewUserNotFound(err error, msg string) error
+```
+NewUserNotFound returns an error which wraps err and satisfies
+IsUserNotFound().
+
+
+## func NotAssignedf
+``` go
+func NotAssignedf(format string, args ...interface{}) error
+```
+NotAssignedf returns an error which satisfies IsNotAssigned().
+
+
+## func NotFoundf
+``` go
+func NotFoundf(format string, args ...interface{}) error
+```
+NotFoundf returns an error which satisfies IsNotFound().
+
+
+## func NotImplementedf
+``` go
+func NotImplementedf(format string, args ...interface{}) error
+```
+NotImplementedf returns an error which satisfies IsNotImplemented().
+
+
+## func NotProvisionedf
+``` go
+func NotProvisionedf(format string, args ...interface{}) error
+```
+NotProvisionedf returns an error which satisfies IsNotProvisioned().
+
+
+## func NotSupportedf
+``` go
+func NotSupportedf(format string, args ...interface{}) error
+```
+NotSupportedf returns an error which satisfies IsNotSupported().
+
+
+## func NotValidf
+``` go
+func NotValidf(format string, args ...interface{}) error
+```
+NotValidf returns an error which satisfies IsNotValid().
+
+
+## func Trace
+``` go
+func Trace(other error) error
+```
+Trace adds the location of the Trace call to the stack. The Cause of the
+resulting error is the same as the error parameter. If the other error is
+nil, the result will be nil.
+
+For example:
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Trace(err)
+ }
+
+
+## func Unauthorizedf
+``` go
+func Unauthorizedf(format string, args ...interface{}) error
+```
+Unauthorizedf returns an error which satisfies IsUnauthorized().
+
+
+## func UserNotFoundf
+``` go
+func UserNotFoundf(format string, args ...interface{}) error
+```
+UserNotFoundf returns an error which satisfies IsUserNotFound().
+
+
+## func Wrap
+``` go
+func Wrap(other, newDescriptive error) error
+```
+Wrap changes the Cause of the error. The location of the Wrap call is also
+stored in the error stack.
+
+For example:
+
+
+ if err := SomeFunc(); err != nil {
+ newErr := &packageError{"more context", private_value}
+ return errors.Wrap(err, newErr)
+ }
+
+
+## func Wrapf
+``` go
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
+```
+Wrapf changes the Cause of the error, and adds an annotation. The location
+of the Wrap call is also stored in the error stack.
+
+For example:
+
+
+ if err := SomeFunc(); err != nil {
+ return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+ }
+
+
+
+## type Err
+``` go
+type Err struct {
+ // contains filtered or unexported fields
+}
+```
+Err holds a description of an error along with information about
+where the error was created.
+
+It may be embedded in custom error types to add extra information that
+this errors package can understand.
+
+
+
+
+
+
+
+
+
+### func NewErr
+``` go
+func NewErr(format string, args ...interface{}) Err
+```
+NewErr is used to return an Err for the purpose of embedding in other
+structures. The location is not specified, and needs to be set with a call
+to SetLocation.
+
+For example:
+
+
+ type FooError struct {
+ errors.Err
+ code int
+ }
+
+ func NewFooError(code int) error {
+ err := &FooError{errors.NewErr("foo"), code}
+ err.SetLocation(1)
+ return err
+ }
+
+
+### func NewErrWithCause
+``` go
+func NewErrWithCause(other error, format string, args ...interface{}) Err
+```
+NewErrWithCause is used to return an Err with cause by other error for the purpose of embedding in other
+structures. The location is not specified, and needs to be set with a call
+to SetLocation.
+
+For example:
+
+
+ type FooError struct {
+ errors.Err
+ code int
+ }
+
+ func (e *FooError) Annotate(format string, args ...interface{}) error {
+ err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
+ err.SetLocation(1)
+ return err
+ })
+
+
+
+
+### func (\*Err) Cause
+``` go
+func (e *Err) Cause() error
+```
+The Cause of an error is the most recent error in the error stack that
+meets one of these criteria: the original error that was raised; the new
+error that was passed into the Wrap function; the most recently masked
+error; or nil if the error itself is considered the Cause. Normally this
+method is not invoked directly, but instead through the Cause stand alone
+function.
+
+
+
+### func (\*Err) Error
+``` go
+func (e *Err) Error() string
+```
+Error implements error.Error.
+
+
+
+### func (\*Err) Format
+``` go
+func (e *Err) Format(s fmt.State, verb rune)
+```
+Format implements fmt.Formatter
+When printing errors with %+v it also prints the stack trace.
+%#v unsurprisingly will print the real underlying type.
+
+
+
+### func (\*Err) Location
+``` go
+func (e *Err) Location() (filename string, line int)
+```
+Location is the file and line of where the error was most recently
+created or annotated.
+
+
+
+### func (\*Err) Message
+``` go
+func (e *Err) Message() string
+```
+Message returns the message stored with the most recent location. This is
+the empty string if the most recent call was Trace, or the message stored
+with Annotate or Mask.
+
+
+
+### func (\*Err) SetLocation
+``` go
+func (e *Err) SetLocation(callDepth int)
+```
+SetLocation records the source location of the error at callDepth stack
+frames above the call.
+
+
+
+### func (\*Err) StackTrace
+``` go
+func (e *Err) StackTrace() []string
+```
+StackTrace returns one string for each location recorded in the stack of
+errors. The first value is the originating error, with a line for each
+other annotation or tracing of the error.
+
+
+
+### func (\*Err) Underlying
+``` go
+func (e *Err) Underlying() error
+```
+Underlying returns the previous error in the error stack, if any. A client
+should not ever really call this method. It is used to build the error
+stack and should not be introspected by client calls. Or more
+specifically, clients should not depend on anything but the `Cause` of an
+error.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/vendor/github.com/juju/errors/dependencies.tsv b/vendor/github.com/juju/errors/dependencies.tsv
new file mode 100644
index 000000000..e32434494
--- /dev/null
+++ b/vendor/github.com/juju/errors/dependencies.tsv
@@ -0,0 +1,5 @@
+github.com/juju/loggo git 8232ab8918d91c72af1a9fb94d3edbe31d88b790 2017-06-05T01:46:07Z
+github.com/juju/testing git 72703b1e95eb8ce4737fd8a3d8496c6b0be280a6 2018-05-17T13:41:05Z
+gopkg.in/check.v1 git 4f90aeace3a26ad7021961c297b22c42160c7b25 2016-01-05T16:49:36Z
+gopkg.in/mgo.v2 git f2b6f6c918c452ad107eec89615f074e3bd80e33 2016-08-18T01:52:18Z
+gopkg.in/yaml.v2 git 1be3d31502d6eabc0dd7ce5b0daab022e14a5538 2017-07-12T05:45:46Z
diff --git a/vendor/github.com/juju/errors/doc.go b/vendor/github.com/juju/errors/doc.go
new file mode 100644
index 000000000..35b119aa3
--- /dev/null
+++ b/vendor/github.com/juju/errors/doc.go
@@ -0,0 +1,81 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+ if err := SomeFunc(); err != nil {
+ return err
+ }
+
+This instead becomes:
+
+ if err := SomeFunc(); err != nil {
+ return errors.Trace(err)
+ }
+
+which just records the file and line number of the Trace call, or
+
+ if err := SomeFunc(); err != nil {
+ return errors.Annotate(err, "more context")
+ }
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does. The underlying cause of the error is available using the
+`Cause` function.
+
+ os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+ err := errors.Errorf("original")
+ err = errors.Annotatef(err, "context")
+ err = errors.Annotatef(err, "more context")
+ err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+ errors.ErrorStack(err)
+
+will return something like:
+
+ first error
+ github.com/juju/errors/annotation_test.go:193:
+ github.com/juju/errors/annotation_test.go:194: annotation
+ github.com/juju/errors/annotation_test.go:195:
+ github.com/juju/errors/annotation_test.go:196: more context
+ github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+ if err := FindField(field); err != nil {
+ return errors.Wrap(err, errors.NotFoundf(field))
+ }
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+*/
+package errors
diff --git a/vendor/github.com/juju/errors/error.go b/vendor/github.com/juju/errors/error.go
new file mode 100644
index 000000000..d67e33863
--- /dev/null
+++ b/vendor/github.com/juju/errors/error.go
@@ -0,0 +1,172 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+)
+
+// Err holds a description of an error along with information about
+// where the error was created.
+//
+// It may be embedded in custom error types to add extra information that
+// this errors package can understand.
+type Err struct {
+ // message holds an annotation of the error.
+ message string
+
+ // cause holds the cause of the error as returned
+ // by the Cause method.
+ cause error
+
+ // previous holds the previous error in the error stack, if any.
+ previous error
+
+ // file and line hold the source code location where the error was
+ // created.
+ file string
+ line int
+}
+
+// NewErr is used to return an Err for the purpose of embedding in other
+// structures. The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+// type FooError struct {
+// errors.Err
+// code int
+// }
+//
+// func NewFooError(code int) error {
+// err := &FooError{errors.NewErr("foo"), code}
+// err.SetLocation(1)
+// return err
+// }
+func NewErr(format string, args ...interface{}) Err {
+ return Err{
+ message: fmt.Sprintf(format, args...),
+ }
+}
+
+// NewErrWithCause is used to return an Err with cause by other error for the purpose of embedding in other
+// structures. The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+// type FooError struct {
+// errors.Err
+// code int
+// }
+//
+// func (e *FooError) Annotate(format string, args ...interface{}) error {
+// err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
+// err.SetLocation(1)
+// return err
+// })
+func NewErrWithCause(other error, format string, args ...interface{}) Err {
+ return Err{
+ message: fmt.Sprintf(format, args...),
+ cause: Cause(other),
+ previous: other,
+ }
+}
+
+// Location is the file and line of where the error was most recently
+// created or annotated.
+func (e *Err) Location() (filename string, line int) {
+ return e.file, e.line
+}
+
+// Underlying returns the previous error in the error stack, if any. A client
+// should not ever really call this method. It is used to build the error
+// stack and should not be introspected by client calls. Or more
+// specifically, clients should not depend on anything but the `Cause` of an
+// error.
+func (e *Err) Underlying() error {
+ return e.previous
+}
+
+// The Cause of an error is the most recent error in the error stack that
+// meets one of these criteria: the original error that was raised; the new
+// error that was passed into the Wrap function; the most recently masked
+// error; or nil if the error itself is considered the Cause. Normally this
+// method is not invoked directly, but instead through the Cause stand alone
+// function.
+func (e *Err) Cause() error {
+ return e.cause
+}
+
+// Message returns the message stored with the most recent location. This is
+// the empty string if the most recent call was Trace, or the message stored
+// with Annotate or Mask.
+func (e *Err) Message() string {
+ return e.message
+}
+
+// Error implements error.Error.
+func (e *Err) Error() string {
+ // We want to walk up the stack of errors showing the annotations
+ // as long as the cause is the same.
+ err := e.previous
+ if !sameError(Cause(err), e.cause) && e.cause != nil {
+ err = e.cause
+ }
+ switch {
+ case err == nil:
+ return e.message
+ case e.message == "":
+ return err.Error()
+ }
+ return fmt.Sprintf("%s: %v", e.message, err)
+}
+
+// Format implements fmt.Formatter
+// When printing errors with %+v it also prints the stack trace.
+// %#v unsurprisingly will print the real underlying type.
+func (e *Err) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ fmt.Fprintf(s, "%s", ErrorStack(e))
+ return
+ case s.Flag('#'):
+ // avoid infinite recursion by wrapping e into a type
+ // that doesn't implement Formatter.
+ fmt.Fprintf(s, "%#v", (*unformatter)(e))
+ return
+ }
+ fallthrough
+ case 's':
+ fmt.Fprintf(s, "%s", e.Error())
+ }
+}
+
+// helper for Format
+type unformatter Err
+
+func (unformatter) Format() { /* break the fmt.Formatter interface */ }
+
+// SetLocation records the source location of the error at callDepth stack
+// frames above the call.
+func (e *Err) SetLocation(callDepth int) {
+ _, file, line, _ := runtime.Caller(callDepth + 1)
+ e.file = trimGoPath(file)
+ e.line = line
+}
+
+// StackTrace returns one string for each location recorded in the stack of
+// errors. The first value is the originating error, with a line for each
+// other annotation or tracing of the error.
+func (e *Err) StackTrace() []string {
+ return errorStack(e)
+}
+
+// Ideally we'd have a way to check identity, but deep equals will do.
+func sameError(e1, e2 error) bool {
+ return reflect.DeepEqual(e1, e2)
+}
diff --git a/vendor/github.com/juju/errors/errortypes.go b/vendor/github.com/juju/errors/errortypes.go
new file mode 100644
index 000000000..5faf1e22d
--- /dev/null
+++ b/vendor/github.com/juju/errors/errortypes.go
@@ -0,0 +1,333 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+ "fmt"
+)
+
+// wrap is a helper to construct an *wrapper.
+func wrap(err error, format, suffix string, args ...interface{}) Err {
+ newErr := Err{
+ message: fmt.Sprintf(format+suffix, args...),
+ previous: err,
+ }
+ newErr.SetLocation(2)
+ return newErr
+}
+
+// timeout represents an error on timeout.
+type timeout struct {
+ Err
+}
+
+// Timeoutf returns an error which satisfies IsTimeout().
+func Timeoutf(format string, args ...interface{}) error {
+ return &timeout{wrap(nil, format, " timeout", args...)}
+}
+
+// NewTimeout returns an error which wraps err that satisfies
+// IsTimeout().
+func NewTimeout(err error, msg string) error {
+ return &timeout{wrap(err, msg, "")}
+}
+
+// IsTimeout reports whether err was created with Timeoutf() or
+// NewTimeout().
+func IsTimeout(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*timeout)
+ return ok
+}
+
+// notFound represents an error when something has not been found.
+type notFound struct {
+ Err
+}
+
+// NotFoundf returns an error which satisfies IsNotFound().
+func NotFoundf(format string, args ...interface{}) error {
+ return ¬Found{wrap(nil, format, " not found", args...)}
+}
+
+// NewNotFound returns an error which wraps err that satisfies
+// IsNotFound().
+func NewNotFound(err error, msg string) error {
+ return ¬Found{wrap(err, msg, "")}
+}
+
+// IsNotFound reports whether err was created with NotFoundf() or
+// NewNotFound().
+func IsNotFound(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notFound)
+ return ok
+}
+
+// userNotFound represents an error when an inexistent user is looked up.
+type userNotFound struct {
+ Err
+}
+
+// UserNotFoundf returns an error which satisfies IsUserNotFound().
+func UserNotFoundf(format string, args ...interface{}) error {
+ return &userNotFound{wrap(nil, format, " user not found", args...)}
+}
+
+// NewUserNotFound returns an error which wraps err and satisfies
+// IsUserNotFound().
+func NewUserNotFound(err error, msg string) error {
+ return &userNotFound{wrap(err, msg, "")}
+}
+
+// IsUserNotFound reports whether err was created with UserNotFoundf() or
+// NewUserNotFound().
+func IsUserNotFound(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*userNotFound)
+ return ok
+}
+
+// unauthorized represents an error when an operation is unauthorized.
+type unauthorized struct {
+ Err
+}
+
+// Unauthorizedf returns an error which satisfies IsUnauthorized().
+func Unauthorizedf(format string, args ...interface{}) error {
+ return &unauthorized{wrap(nil, format, "", args...)}
+}
+
+// NewUnauthorized returns an error which wraps err and satisfies
+// IsUnauthorized().
+func NewUnauthorized(err error, msg string) error {
+ return &unauthorized{wrap(err, msg, "")}
+}
+
+// IsUnauthorized reports whether err was created with Unauthorizedf() or
+// NewUnauthorized().
+func IsUnauthorized(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*unauthorized)
+ return ok
+}
+
+// notImplemented represents an error when something is not
+// implemented.
+type notImplemented struct {
+ Err
+}
+
+// NotImplementedf returns an error which satisfies IsNotImplemented().
+func NotImplementedf(format string, args ...interface{}) error {
+ return ¬Implemented{wrap(nil, format, " not implemented", args...)}
+}
+
+// NewNotImplemented returns an error which wraps err and satisfies
+// IsNotImplemented().
+func NewNotImplemented(err error, msg string) error {
+ return ¬Implemented{wrap(err, msg, "")}
+}
+
+// IsNotImplemented reports whether err was created with
+// NotImplementedf() or NewNotImplemented().
+func IsNotImplemented(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notImplemented)
+ return ok
+}
+
+// alreadyExists represents and error when something already exists.
+type alreadyExists struct {
+ Err
+}
+
+// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+func AlreadyExistsf(format string, args ...interface{}) error {
+ return &alreadyExists{wrap(nil, format, " already exists", args...)}
+}
+
+// NewAlreadyExists returns an error which wraps err and satisfies
+// IsAlreadyExists().
+func NewAlreadyExists(err error, msg string) error {
+ return &alreadyExists{wrap(err, msg, "")}
+}
+
+// IsAlreadyExists reports whether the error was created with
+// AlreadyExistsf() or NewAlreadyExists().
+func IsAlreadyExists(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*alreadyExists)
+ return ok
+}
+
+// notSupported represents an error when something is not supported.
+type notSupported struct {
+ Err
+}
+
+// NotSupportedf returns an error which satisfies IsNotSupported().
+func NotSupportedf(format string, args ...interface{}) error {
+ return ¬Supported{wrap(nil, format, " not supported", args...)}
+}
+
+// NewNotSupported returns an error which wraps err and satisfies
+// IsNotSupported().
+func NewNotSupported(err error, msg string) error {
+ return ¬Supported{wrap(err, msg, "")}
+}
+
+// IsNotSupported reports whether the error was created with
+// NotSupportedf() or NewNotSupported().
+func IsNotSupported(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notSupported)
+ return ok
+}
+
+// notValid represents an error when something is not valid.
+type notValid struct {
+ Err
+}
+
+// NotValidf returns an error which satisfies IsNotValid().
+func NotValidf(format string, args ...interface{}) error {
+ return ¬Valid{wrap(nil, format, " not valid", args...)}
+}
+
+// NewNotValid returns an error which wraps err and satisfies IsNotValid().
+func NewNotValid(err error, msg string) error {
+ return ¬Valid{wrap(err, msg, "")}
+}
+
+// IsNotValid reports whether the error was created with NotValidf() or
+// NewNotValid().
+func IsNotValid(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notValid)
+ return ok
+}
+
+// notProvisioned represents an error when something is not yet provisioned.
+type notProvisioned struct {
+ Err
+}
+
+// NotProvisionedf returns an error which satisfies IsNotProvisioned().
+func NotProvisionedf(format string, args ...interface{}) error {
+ return ¬Provisioned{wrap(nil, format, " not provisioned", args...)}
+}
+
+// NewNotProvisioned returns an error which wraps err that satisfies
+// IsNotProvisioned().
+func NewNotProvisioned(err error, msg string) error {
+ return ¬Provisioned{wrap(err, msg, "")}
+}
+
+// IsNotProvisioned reports whether err was created with NotProvisionedf() or
+// NewNotProvisioned().
+func IsNotProvisioned(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notProvisioned)
+ return ok
+}
+
+// notAssigned represents an error when something is not yet assigned to
+// something else.
+type notAssigned struct {
+ Err
+}
+
+// NotAssignedf returns an error which satisfies IsNotAssigned().
+func NotAssignedf(format string, args ...interface{}) error {
+ return ¬Assigned{wrap(nil, format, " not assigned", args...)}
+}
+
+// NewNotAssigned returns an error which wraps err that satisfies
+// IsNotAssigned().
+func NewNotAssigned(err error, msg string) error {
+ return ¬Assigned{wrap(err, msg, "")}
+}
+
+// IsNotAssigned reports whether err was created with NotAssignedf() or
+// NewNotAssigned().
+func IsNotAssigned(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*notAssigned)
+ return ok
+}
+
+// badRequest represents an error when a request has bad parameters.
+type badRequest struct {
+ Err
+}
+
+// BadRequestf returns an error which satisfies IsBadRequest().
+func BadRequestf(format string, args ...interface{}) error {
+ return &badRequest{wrap(nil, format, "", args...)}
+}
+
+// NewBadRequest returns an error which wraps err that satisfies
+// IsBadRequest().
+func NewBadRequest(err error, msg string) error {
+ return &badRequest{wrap(err, msg, "")}
+}
+
+// IsBadRequest reports whether err was created with BadRequestf() or
+// NewBadRequest().
+func IsBadRequest(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*badRequest)
+ return ok
+}
+
+// methodNotAllowed represents an error when an HTTP request
+// is made with an inappropriate method.
+type methodNotAllowed struct {
+ Err
+}
+
+// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
+func MethodNotAllowedf(format string, args ...interface{}) error {
+ return &methodNotAllowed{wrap(nil, format, "", args...)}
+}
+
+// NewMethodNotAllowed returns an error which wraps err that satisfies
+// IsMethodNotAllowed().
+func NewMethodNotAllowed(err error, msg string) error {
+ return &methodNotAllowed{wrap(err, msg, "")}
+}
+
+// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
+// NewMethodNotAllowed().
+func IsMethodNotAllowed(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*methodNotAllowed)
+ return ok
+}
+
+// forbidden represents an error when a request cannot be completed because of
+// missing privileges
+type forbidden struct {
+ Err
+}
+
+// Forbiddenf returns an error which satistifes IsForbidden()
+func Forbiddenf(format string, args ...interface{}) error {
+ return &forbidden{wrap(nil, format, "", args...)}
+}
+
+// NewForbidden returns an error which wraps err that satisfies
+// IsForbidden().
+func NewForbidden(err error, msg string) error {
+ return &forbidden{wrap(err, msg, "")}
+}
+
+// IsForbidden reports whether err was created with Forbiddenf() or
+// NewForbidden().
+func IsForbidden(err error) bool {
+ err = Cause(err)
+ _, ok := err.(*forbidden)
+ return ok
+}
diff --git a/vendor/github.com/juju/errors/functions.go b/vendor/github.com/juju/errors/functions.go
new file mode 100644
index 000000000..f86b09b2d
--- /dev/null
+++ b/vendor/github.com/juju/errors/functions.go
@@ -0,0 +1,330 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+ "fmt"
+ "strings"
+)
+
+// New is a drop in replacement for the standard library errors module that records
+// the location that the error is created.
+//
+// For example:
+// return errors.New("validation failed")
+//
+func New(message string) error {
+ err := &Err{message: message}
+ err.SetLocation(1)
+ return err
+}
+
+// Errorf creates a new annotated error and records the location that the
+// error is created. This should be a drop in replacement for fmt.Errorf.
+//
+// For example:
+// return errors.Errorf("validation failed: %s", message)
+//
+func Errorf(format string, args ...interface{}) error {
+ err := &Err{message: fmt.Sprintf(format, args...)}
+ err.SetLocation(1)
+ return err
+}
+
+// Trace adds the location of the Trace call to the stack. The Cause of the
+// resulting error is the same as the error parameter. If the other error is
+// nil, the result will be nil.
+//
+// For example:
+// if err := SomeFunc(); err != nil {
+// return errors.Trace(err)
+// }
+//
+func Trace(other error) error {
+ if other == nil {
+ return nil
+ }
+ err := &Err{previous: other, cause: Cause(other)}
+ err.SetLocation(1)
+ return err
+}
+
+// Annotate is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+// if err := SomeFunc(); err != nil {
+// return errors.Annotate(err, "failed to frombulate")
+// }
+//
+func Annotate(other error, message string) error {
+ if other == nil {
+ return nil
+ }
+ err := &Err{
+ previous: other,
+ cause: Cause(other),
+ message: message,
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// Annotatef is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+// if err := SomeFunc(); err != nil {
+// return errors.Annotatef(err, "failed to frombulate the %s", arg)
+// }
+//
+func Annotatef(other error, format string, args ...interface{}) error {
+ if other == nil {
+ return nil
+ }
+ err := &Err{
+ previous: other,
+ cause: Cause(other),
+ message: fmt.Sprintf(format, args...),
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// DeferredAnnotatef annotates the given error (when it is not nil) with the given
+// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+// does nothing. This method is used in a defer statement in order to annotate any
+// resulting error with the same message.
+//
+// For example:
+//
+// defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+//
+func DeferredAnnotatef(err *error, format string, args ...interface{}) {
+ if *err == nil {
+ return
+ }
+ newErr := &Err{
+ message: fmt.Sprintf(format, args...),
+ cause: Cause(*err),
+ previous: *err,
+ }
+ newErr.SetLocation(1)
+ *err = newErr
+}
+
+// Wrap changes the Cause of the error. The location of the Wrap call is also
+// stored in the error stack.
+//
+// For example:
+// if err := SomeFunc(); err != nil {
+// newErr := &packageError{"more context", private_value}
+// return errors.Wrap(err, newErr)
+// }
+//
+func Wrap(other, newDescriptive error) error {
+ err := &Err{
+ previous: other,
+ cause: newDescriptive,
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// Wrapf changes the Cause of the error, and adds an annotation. The location
+// of the Wrap call is also stored in the error stack.
+//
+// For example:
+// if err := SomeFunc(); err != nil {
+// return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+// }
+//
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
+ err := &Err{
+ message: fmt.Sprintf(format, args...),
+ previous: other,
+ cause: newDescriptive,
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// Mask masks the given error with the given format string and arguments (like
+// fmt.Sprintf), returning a new error that maintains the error stack, but
+// hides the underlying error type. The error string still contains the full
+// annotations. If you want to hide the annotations, call Wrap.
+func Maskf(other error, format string, args ...interface{}) error {
+ if other == nil {
+ return nil
+ }
+ err := &Err{
+ message: fmt.Sprintf(format, args...),
+ previous: other,
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// Mask hides the underlying error type, and records the location of the masking.
+func Mask(other error) error {
+ if other == nil {
+ return nil
+ }
+ err := &Err{
+ previous: other,
+ }
+ err.SetLocation(1)
+ return err
+}
+
+// Cause returns the cause of the given error. This will be either the
+// original error, or the result of a Wrap or Mask call.
+//
+// Cause is the usual way to diagnose errors that may have been wrapped by
+// the other errors functions.
+func Cause(err error) error {
+ var diag error
+ if err, ok := err.(causer); ok {
+ diag = err.Cause()
+ }
+ if diag != nil {
+ return diag
+ }
+ return err
+}
+
+type causer interface {
+ Cause() error
+}
+
+type wrapper interface {
+ // Message returns the top level error message,
+ // not including the message from the Previous
+ // error.
+ Message() string
+
+ // Underlying returns the Previous error, or nil
+ // if there is none.
+ Underlying() error
+}
+
+type locationer interface {
+ Location() (string, int)
+}
+
+var (
+ _ wrapper = (*Err)(nil)
+ _ locationer = (*Err)(nil)
+ _ causer = (*Err)(nil)
+)
+
+// Details returns information about the stack of errors wrapped by err, in
+// the format:
+//
+// [{filename:99: error one} {otherfile:55: cause of error one}]
+//
+// This is a terse alternative to ErrorStack as it returns a single line.
+func Details(err error) string {
+ if err == nil {
+ return "[]"
+ }
+ var s []byte
+ s = append(s, '[')
+ for {
+ s = append(s, '{')
+ if err, ok := err.(locationer); ok {
+ file, line := err.Location()
+ if file != "" {
+ s = append(s, fmt.Sprintf("%s:%d", file, line)...)
+ s = append(s, ": "...)
+ }
+ }
+ if cerr, ok := err.(wrapper); ok {
+ s = append(s, cerr.Message()...)
+ err = cerr.Underlying()
+ } else {
+ s = append(s, err.Error()...)
+ err = nil
+ }
+ s = append(s, '}')
+ if err == nil {
+ break
+ }
+ s = append(s, ' ')
+ }
+ s = append(s, ']')
+ return string(s)
+}
+
+// ErrorStack returns a string representation of the annotated error. If the
+// error passed as the parameter is not an annotated error, the result is
+// simply the result of the Error() method on that error.
+//
+// If the error is an annotated error, a multi-line string is returned where
+// each line represents one entry in the annotation stack. The full filename
+// from the call stack is used in the output.
+//
+// first error
+// github.com/juju/errors/annotation_test.go:193:
+// github.com/juju/errors/annotation_test.go:194: annotation
+// github.com/juju/errors/annotation_test.go:195:
+// github.com/juju/errors/annotation_test.go:196: more context
+// github.com/juju/errors/annotation_test.go:197:
+func ErrorStack(err error) string {
+ return strings.Join(errorStack(err), "\n")
+}
+
+func errorStack(err error) []string {
+ if err == nil {
+ return nil
+ }
+
+ // We want the first error first
+ var lines []string
+ for {
+ var buff []byte
+ if err, ok := err.(locationer); ok {
+ file, line := err.Location()
+ // Strip off the leading GOPATH/src path elements.
+ file = trimGoPath(file)
+ if file != "" {
+ buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
+ buff = append(buff, ": "...)
+ }
+ }
+ if cerr, ok := err.(wrapper); ok {
+ message := cerr.Message()
+ buff = append(buff, message...)
+ // If there is a cause for this error, and it is different to the cause
+ // of the underlying error, then output the error string in the stack trace.
+ var cause error
+ if err1, ok := err.(causer); ok {
+ cause = err1.Cause()
+ }
+ err = cerr.Underlying()
+ if cause != nil && !sameError(Cause(err), cause) {
+ if message != "" {
+ buff = append(buff, ": "...)
+ }
+ buff = append(buff, cause.Error()...)
+ }
+ } else {
+ buff = append(buff, err.Error()...)
+ err = nil
+ }
+ lines = append(lines, string(buff))
+ if err == nil {
+ break
+ }
+ }
+ // reverse the lines to get the original error, which was at the end of
+ // the list, back to the start.
+ var result []string
+ for i := len(lines); i > 0; i-- {
+ result = append(result, lines[i-1])
+ }
+ return result
+}
diff --git a/vendor/github.com/juju/errors/path.go b/vendor/github.com/juju/errors/path.go
new file mode 100644
index 000000000..e216eb8ff
--- /dev/null
+++ b/vendor/github.com/juju/errors/path.go
@@ -0,0 +1,19 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+ "fmt"
+ "go/build"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+var goPath = build.Default.GOPATH
+var srcDir = filepath.Join(goPath, "src")
+
+func trimGoPath(filename string) string {
+ return strings.TrimPrefix(filename, fmt.Sprintf("%s%s", srcDir, string(os.PathSeparator)))
+}
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
new file mode 100644
index 000000000..14127cd83
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE
@@ -0,0 +1,9 @@
+(The MIT License)
+
+Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
new file mode 100644
index 000000000..949b77e30
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md
@@ -0,0 +1,40 @@
+# Windows Terminal Sequences
+
+This library allow for enabling Windows terminal color support for Go.
+
+See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details.
+
+## Usage
+
+```go
+import (
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func main() {
+ sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true)
+}
+
+```
+
+## Authors
+
+The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de).
+
+We thank all the authors who provided code to this library:
+
+* Felix Kollmann
+
+## License
+
+(The MIT License)
+
+Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
new file mode 100644
index 000000000..716c61312
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod
@@ -0,0 +1 @@
+module github.com/konsorten/go-windows-terminal-sequences
diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
new file mode 100644
index 000000000..ef18d8f97
--- /dev/null
+++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go
@@ -0,0 +1,36 @@
+// +build windows
+
+package sequences
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll")
+ setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode")
+)
+
+func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error {
+ const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4
+
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Stdout, &mode)
+ if err != nil {
+ return err
+ }
+
+ if enable {
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ } else {
+ mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ }
+
+ ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode))
+ if ret == 0 {
+ return err
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/Gopkg.lock b/vendor/github.com/miekg/dns/Gopkg.lock
index 4455c9836..686632207 100644
--- a/vendor/github.com/miekg/dns/Gopkg.lock
+++ b/vendor/github.com/miekg/dns/Gopkg.lock
@@ -3,19 +3,55 @@
[[projects]]
branch = "master"
+ digest = "1:6914c49eed986dfb8dffb33516fa129c49929d4d873f41e073c83c11c372b870"
name = "golang.org/x/crypto"
- packages = ["ed25519","ed25519/internal/edwards25519"]
- revision = "b47b1587369238182299fe4dad77d05b8b461e06"
+ packages = [
+ "ed25519",
+ "ed25519/internal/edwards25519",
+ ]
+ pruneopts = ""
+ revision = "e3636079e1a4c1f337f212cc5cd2aca108f6c900"
[[projects]]
branch = "master"
+ digest = "1:08e41d63f8dac84d83797368b56cf0b339e42d0224e5e56668963c28aec95685"
name = "golang.org/x/net"
- packages = ["bpf","internal/iana","internal/socket","ipv4","ipv6"]
- revision = "1e491301e022f8f977054da4c2d852decd59571f"
+ packages = [
+ "bpf",
+ "context",
+ "internal/iana",
+ "internal/socket",
+ "ipv4",
+ "ipv6",
+ ]
+ pruneopts = ""
+ revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
+
+[[projects]]
+ branch = "master"
+ digest = "1:b2ea75de0ccb2db2ac79356407f8a4cd8f798fe15d41b381c00abf3ae8e55ed1"
+ name = "golang.org/x/sync"
+ packages = ["errgroup"]
+ pruneopts = ""
+ revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
+
+[[projects]]
+ branch = "master"
+ digest = "1:149a432fabebb8221a80f77731b1cd63597197ded4f14af606ebe3a0959004ec"
+ name = "golang.org/x/sys"
+ packages = ["unix"]
+ pruneopts = ""
+ revision = "e4b3c5e9061176387e7cea65e4dc5853801f3fb7"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "c4abc38abaeeeeb9be92455c9c02cae32841122b8982aaa067ef25bb8e86ff9d"
+ input-imports = [
+ "golang.org/x/crypto/ed25519",
+ "golang.org/x/net/ipv4",
+ "golang.org/x/net/ipv6",
+ "golang.org/x/sync/errgroup",
+ "golang.org/x/sys/unix",
+ ]
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/vendor/github.com/miekg/dns/Gopkg.toml b/vendor/github.com/miekg/dns/Gopkg.toml
index 2f655b2c7..85e6ff31b 100644
--- a/vendor/github.com/miekg/dns/Gopkg.toml
+++ b/vendor/github.com/miekg/dns/Gopkg.toml
@@ -24,3 +24,15 @@
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/net"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/sys"
+
+[[constraint]]
+ branch = "master"
+ name = "golang.org/x/sync"
diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go
index 7a319b02c..63ced2bd0 100644
--- a/vendor/github.com/miekg/dns/client.go
+++ b/vendor/github.com/miekg/dns/client.go
@@ -567,7 +567,7 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg,
if deadline, ok := ctx.Deadline(); !ok {
timeout = 0
} else {
- timeout = deadline.Sub(time.Now())
+ timeout = time.Until(deadline)
}
// not passing the context to the underlying calls, as the API does not support
// context. For timeouts you should set up Client.Dialer and call Client.Exchange.
diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go
index 3a559793f..91d928c83 100644
--- a/vendor/github.com/miekg/dns/generate.go
+++ b/vendor/github.com/miekg/dns/generate.go
@@ -107,6 +107,8 @@ BuildRR:
mod, offset, err = modToPrintf(s[j+2 : j+2+sep])
if err != nil {
return err.Error()
+ } else if start + offset < 0 || end + offset > 1<<31-1 {
+ return "bad offset in $GENERATE"
}
j += 2 + sep // Jump to it
}
@@ -152,7 +154,7 @@ func modToPrintf(s string) (string, int, error) {
return "", 0, errors.New("bad base in $GENERATE")
}
offset, err := strconv.Atoi(xs[0])
- if err != nil || offset > 255 {
+ if err != nil {
return "", 0, errors.New("bad offset in $GENERATE")
}
width, err := strconv.Atoi(xs[1])
diff --git a/vendor/github.com/miekg/dns/listen_go111.go b/vendor/github.com/miekg/dns/listen_go111.go
index bd024c893..fad195cfe 100644
--- a/vendor/github.com/miekg/dns/listen_go111.go
+++ b/vendor/github.com/miekg/dns/listen_go111.go
@@ -1,4 +1,5 @@
-// +build go1.11,!windows
+// +build go1.11
+// +build aix darwin dragonfly freebsd linux netbsd openbsd
package dns
diff --git a/vendor/github.com/miekg/dns/listen_go_not111.go b/vendor/github.com/miekg/dns/listen_go_not111.go
index f1fc652c4..b9201417a 100644
--- a/vendor/github.com/miekg/dns/listen_go_not111.go
+++ b/vendor/github.com/miekg/dns/listen_go_not111.go
@@ -1,4 +1,4 @@
-// +build !go1.11 windows
+// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
package dns
diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go
index f8b847650..47ac6cf28 100644
--- a/vendor/github.com/miekg/dns/msg.go
+++ b/vendor/github.com/miekg/dns/msg.go
@@ -302,6 +302,12 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
}
// If we did compression and we find something add the pointer here
if pointer != -1 {
+ // Clear the msg buffer after the pointer location, otherwise
+ // packDataNsec writes the wrong data to msg.
+ tainted := msg[nameoffset:off]
+ for i := range tainted {
+ tainted[i] = 0
+ }
// We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression
binary.BigEndian.PutUint16(msg[nameoffset:], uint16(pointer^0xC000))
@@ -367,12 +373,10 @@ Loop:
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
- for i := 0; i < 3-len(bufs); i++ {
+ for i := len(bufs); i < 3; i++ {
s = append(s, '0')
}
- for _, r := range bufs {
- s = append(s, r)
- }
+ s = append(s, bufs...)
// presentation-format \DDD escapes add 3 extra bytes
maxLen += 3
} else {
@@ -512,7 +516,7 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
off = off0
var s string
for off < len(msg) && err == nil {
- s, off, err = unpackTxtString(msg, off)
+ s, off, err = unpackString(msg, off)
if err == nil {
ss = append(ss, s)
}
@@ -520,39 +524,6 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) {
return
}
-func unpackTxtString(msg []byte, offset int) (string, int, error) {
- if offset+1 > len(msg) {
- return "", offset, &Error{err: "overflow unpacking txt"}
- }
- l := int(msg[offset])
- if offset+l+1 > len(msg) {
- return "", offset, &Error{err: "overflow unpacking txt"}
- }
- s := make([]byte, 0, l)
- for _, b := range msg[offset+1 : offset+1+l] {
- switch b {
- case '"', '\\':
- s = append(s, '\\', b)
- default:
- if b < 32 || b > 127 { // unprintable
- var buf [3]byte
- bufs := strconv.AppendInt(buf[:0], int64(b), 10)
- s = append(s, '\\')
- for i := 0; i < 3-len(bufs); i++ {
- s = append(s, '0')
- }
- for _, r := range bufs {
- s = append(s, r)
- }
- } else {
- s = append(s, b)
- }
- }
- }
- offset += 1 + l
- return string(s), offset, nil
-}
-
// Helpers for dealing with escaped bytes
func isDigit(b byte) bool { return b >= '0' && b <= '9' }
@@ -560,6 +531,10 @@ func dddToByte(s []byte) byte {
return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
}
+func dddStringToByte(s string) byte {
+ return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0'))
+}
+
// Helper function for packing and unpacking
func intToBytes(i *big.Int, length int) []byte {
buf := i.Bytes()
diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go
index ec8cd9a85..81fc2b1be 100644
--- a/vendor/github.com/miekg/dns/msg_helpers.go
+++ b/vendor/github.com/miekg/dns/msg_helpers.go
@@ -6,7 +6,7 @@ import (
"encoding/binary"
"encoding/hex"
"net"
- "strconv"
+ "strings"
)
// helper functions called from the generated zmsg.go
@@ -267,29 +267,21 @@ func unpackString(msg []byte, off int) (string, int, error) {
if off+l+1 > len(msg) {
return "", off, &Error{err: "overflow unpacking txt"}
}
- s := make([]byte, 0, l)
+ var s strings.Builder
+ s.Grow(l)
for _, b := range msg[off+1 : off+1+l] {
- switch b {
- case '"', '\\':
- s = append(s, '\\', b)
+ switch {
+ case b == '"' || b == '\\':
+ s.WriteByte('\\')
+ s.WriteByte(b)
+ case b < ' ' || b > '~': // unprintable
+ writeEscapedByte(&s, b)
default:
- if b < 32 || b > 127 { // unprintable
- var buf [3]byte
- bufs := strconv.AppendInt(buf[:0], int64(b), 10)
- s = append(s, '\\')
- for i := 0; i < 3-len(bufs); i++ {
- s = append(s, '0')
- }
- for _, r := range bufs {
- s = append(s, r)
- }
- } else {
- s = append(s, b)
- }
+ s.WriteByte(b)
}
}
off += 1 + l
- return string(s), off, nil
+ return s.String(), off, nil
}
func packString(s string, msg []byte, off int) (int, error) {
diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go
index 41989e7ae..d931da7ef 100644
--- a/vendor/github.com/miekg/dns/privaterr.go
+++ b/vendor/github.com/miekg/dns/privaterr.go
@@ -134,7 +134,7 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
}
-// PrivateHandleRemove removes defenitions required to support private RR type.
+// PrivateHandleRemove removes definitions required to support private RR type.
func PrivateHandleRemove(rtype uint16) {
rtypestr, ok := TypeToString[rtype]
if ok {
@@ -144,5 +144,4 @@ func PrivateHandleRemove(rtype uint16) {
delete(StringToType, rtypestr)
delete(typeToUnpack, rtype)
}
- return
}
diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go
index f9cd47401..a752dbd01 100644
--- a/vendor/github.com/miekg/dns/scan.go
+++ b/vendor/github.com/miekg/dns/scan.go
@@ -10,7 +10,6 @@ import (
)
const maxTok = 2048 // Largest token we can return.
-const maxUint16 = 1<<16 - 1
// Tokinize a RFC 1035 zone file. The tokenizer will normalize it:
// * Add ownernames if they are left blank;
@@ -80,9 +79,9 @@ type lex struct {
length int // length of the token
err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc.
+ torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
line int // line in the file
column int // column in the file
- torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
comment string // any comment text seen
}
@@ -209,10 +208,9 @@ func parseZone(r io.Reader, origin, f string, defttl *ttlState, t chan *Token, i
var prevName string
for l := range c {
// Lexer spotted an error already
- if l.err == true {
+ if l.err {
t <- &Token{Error: &ParseError{f, l.token, l}}
return
-
}
switch st {
case zExpectOwnerDir:
@@ -639,7 +637,6 @@ func zlexer(s *scan, c chan lex) {
if quote {
str[stri] = x
stri++
- break
}
// discard if outside of quotes
case '\n':
diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go
index e9556282d..67f884b0d 100644
--- a/vendor/github.com/miekg/dns/scan_rr.go
+++ b/vendor/github.com/miekg/dns/scan_rr.go
@@ -1665,9 +1665,9 @@ func setTA(h RR_Header, c chan lex, o, f string) (RR, *ParseError, string) {
return nil, &ParseError{f, "bad TA DigestType", l}, ""
}
rr.DigestType = uint8(i)
- s, e, c1 := endingToString(c, "bad TA Digest", f)
- if e != nil {
- return nil, e.(*ParseError), c1
+ s, err, c1 := endingToString(c, "bad TA Digest", f)
+ if err != nil {
+ return nil, err, c1
}
rr.Digest = s
return rr, nil, c1
diff --git a/vendor/github.com/miekg/dns/scanner.go b/vendor/github.com/miekg/dns/scanner.go
index 424e5af9f..5b124ec59 100644
--- a/vendor/github.com/miekg/dns/scanner.go
+++ b/vendor/github.com/miekg/dns/scanner.go
@@ -42,7 +42,7 @@ func (s *scan) tokenText() (byte, error) {
// delay the newline handling until the next token is delivered,
// fixes off-by-one errors when reporting a parse error.
- if s.eof == true {
+ if s.eof {
s.position.Line++
s.position.Column = 0
s.eof = false
diff --git a/vendor/github.com/miekg/dns/serve_mux.go b/vendor/github.com/miekg/dns/serve_mux.go
new file mode 100644
index 000000000..ae304db53
--- /dev/null
+++ b/vendor/github.com/miekg/dns/serve_mux.go
@@ -0,0 +1,147 @@
+package dns
+
+import (
+ "strings"
+ "sync"
+)
+
+// ServeMux is an DNS request multiplexer. It matches the zone name of
+// each incoming request against a list of registered patterns add calls
+// the handler for the pattern that most closely matches the zone name.
+//
+// ServeMux is DNSSEC aware, meaning that queries for the DS record are
+// redirected to the parent zone (if that is also registered), otherwise
+// the child gets the query.
+//
+// ServeMux is also safe for concurrent access from multiple goroutines.
+//
+// The zero ServeMux is empty and ready for use.
+type ServeMux struct {
+ z map[string]Handler
+ m sync.RWMutex
+}
+
+// NewServeMux allocates and returns a new ServeMux.
+func NewServeMux() *ServeMux {
+ return new(ServeMux)
+}
+
+// DefaultServeMux is the default ServeMux used by Serve.
+var DefaultServeMux = NewServeMux()
+
+func (mux *ServeMux) match(q string, t uint16) Handler {
+ mux.m.RLock()
+ defer mux.m.RUnlock()
+ if mux.z == nil {
+ return nil
+ }
+
+ var handler Handler
+
+ // TODO(tmthrgd): Once https://go-review.googlesource.com/c/go/+/137575
+ // lands in a go release, replace the following with strings.ToLower.
+ var sb strings.Builder
+ for i := 0; i < len(q); i++ {
+ c := q[i]
+ if !(c >= 'A' && c <= 'Z') {
+ continue
+ }
+
+ sb.Grow(len(q))
+ sb.WriteString(q[:i])
+
+ for ; i < len(q); i++ {
+ c := q[i]
+ if c >= 'A' && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+
+ sb.WriteByte(c)
+ }
+
+ q = sb.String()
+ break
+ }
+
+ for off, end := 0, false; !end; off, end = NextLabel(q, off) {
+ if h, ok := mux.z[q[off:]]; ok {
+ if t != TypeDS {
+ return h
+ }
+ // Continue for DS to see if we have a parent too, if so delegate to the parent
+ handler = h
+ }
+ }
+
+ // Wildcard match, if we have found nothing try the root zone as a last resort.
+ if h, ok := mux.z["."]; ok {
+ return h
+ }
+
+ return handler
+}
+
+// Handle adds a handler to the ServeMux for pattern.
+func (mux *ServeMux) Handle(pattern string, handler Handler) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ if mux.z == nil {
+ mux.z = make(map[string]Handler)
+ }
+ mux.z[Fqdn(pattern)] = handler
+ mux.m.Unlock()
+}
+
+// HandleFunc adds a handler function to the ServeMux for pattern.
+func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ mux.Handle(pattern, HandlerFunc(handler))
+}
+
+// HandleRemove deregisters the handler specific for pattern from the ServeMux.
+func (mux *ServeMux) HandleRemove(pattern string) {
+ if pattern == "" {
+ panic("dns: invalid pattern " + pattern)
+ }
+ mux.m.Lock()
+ delete(mux.z, Fqdn(pattern))
+ mux.m.Unlock()
+}
+
+// ServeDNS dispatches the request to the handler whose pattern most
+// closely matches the request message.
+//
+// ServeDNS is DNSSEC aware, meaning that queries for the DS record
+// are redirected to the parent zone (if that is also registered),
+// otherwise the child gets the query.
+//
+// If no handler is found, or there is no question, a standard SERVFAIL
+// message is returned
+func (mux *ServeMux) ServeDNS(w ResponseWriter, req *Msg) {
+ var h Handler
+ if len(req.Question) >= 1 { // allow more than one question
+ h = mux.match(req.Question[0].Name, req.Question[0].Qtype)
+ }
+
+ if h != nil {
+ h.ServeDNS(w, req)
+ } else {
+ HandleFailed(w, req)
+ }
+}
+
+// Handle registers the handler with the given pattern
+// in the DefaultServeMux. The documentation for
+// ServeMux explains how patterns are matched.
+func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
+
+// HandleRemove deregisters the handle with the given pattern
+// in the DefaultServeMux.
+func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
+
+// HandleFunc registers the handler function with the given pattern
+// in the DefaultServeMux.
+func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
+ DefaultServeMux.HandleFunc(pattern, handler)
+}
diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go
index 4fbf7db6f..4b4ec33c8 100644
--- a/vendor/github.com/miekg/dns/server.go
+++ b/vendor/github.com/miekg/dns/server.go
@@ -41,6 +41,17 @@ type Handler interface {
ServeDNS(w ResponseWriter, r *Msg)
}
+// The HandlerFunc type is an adapter to allow the use of
+// ordinary functions as DNS handlers. If f is a function
+// with the appropriate signature, HandlerFunc(f) is a
+// Handler object that calls f.
+type HandlerFunc func(ResponseWriter, *Msg)
+
+// ServeDNS calls f(w, r).
+func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
+ f(w, r)
+}
+
// A ResponseWriter interface is used by an DNS handler to
// construct an DNS response.
type ResponseWriter interface {
@@ -63,11 +74,17 @@ type ResponseWriter interface {
Hijack()
}
+// A ConnectionStater interface is used by a DNS Handler to access TLS connection state
+// when available.
+type ConnectionStater interface {
+ ConnectionState() *tls.ConnectionState
+}
+
type response struct {
msg []byte
hijacked bool // connection has been hijacked by handler
- tsigStatus error
tsigTimersOnly bool
+ tsigStatus error
tsigRequestMAC string
tsigSecret map[string]string // the tsig secrets
udp *net.UDPConn // i/o connection if UDP was used
@@ -77,35 +94,6 @@ type response struct {
wg *sync.WaitGroup // for gracefull shutdown
}
-// ServeMux is an DNS request multiplexer. It matches the
-// zone name of each incoming request against a list of
-// registered patterns add calls the handler for the pattern
-// that most closely matches the zone name. ServeMux is DNSSEC aware, meaning
-// that queries for the DS record are redirected to the parent zone (if that
-// is also registered), otherwise the child gets the query.
-// ServeMux is also safe for concurrent access from multiple goroutines.
-type ServeMux struct {
- z map[string]Handler
- m *sync.RWMutex
-}
-
-// NewServeMux allocates and returns a new ServeMux.
-func NewServeMux() *ServeMux { return &ServeMux{z: make(map[string]Handler), m: new(sync.RWMutex)} }
-
-// DefaultServeMux is the default ServeMux used by Serve.
-var DefaultServeMux = NewServeMux()
-
-// The HandlerFunc type is an adapter to allow the use of
-// ordinary functions as DNS handlers. If f is a function
-// with the appropriate signature, HandlerFunc(f) is a
-// Handler object that calls f.
-type HandlerFunc func(ResponseWriter, *Msg)
-
-// ServeDNS calls f(w, r).
-func (f HandlerFunc) ServeDNS(w ResponseWriter, r *Msg) {
- f(w, r)
-}
-
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
func HandleFailed(w ResponseWriter, r *Msg) {
m := new(Msg)
@@ -114,8 +102,6 @@ func HandleFailed(w ResponseWriter, r *Msg) {
w.WriteMsg(m)
}
-func failedHandler() Handler { return HandlerFunc(HandleFailed) }
-
// ListenAndServe Starts a server on address and network specified Invoke handler
// for incoming queries.
func ListenAndServe(addr string, network string, handler Handler) error {
@@ -154,99 +140,6 @@ func ActivateAndServe(l net.Listener, p net.PacketConn, handler Handler) error {
return server.ActivateAndServe()
}
-func (mux *ServeMux) match(q string, t uint16) Handler {
- mux.m.RLock()
- defer mux.m.RUnlock()
- var handler Handler
- b := make([]byte, len(q)) // worst case, one label of length q
- off := 0
- end := false
- for {
- l := len(q[off:])
- for i := 0; i < l; i++ {
- b[i] = q[off+i]
- if b[i] >= 'A' && b[i] <= 'Z' {
- b[i] |= 'a' - 'A'
- }
- }
- if h, ok := mux.z[string(b[:l])]; ok { // causes garbage, might want to change the map key
- if t != TypeDS {
- return h
- }
- // Continue for DS to see if we have a parent too, if so delegeate to the parent
- handler = h
- }
- off, end = NextLabel(q, off)
- if end {
- break
- }
- }
- // Wildcard match, if we have found nothing try the root zone as a last resort.
- if h, ok := mux.z["."]; ok {
- return h
- }
- return handler
-}
-
-// Handle adds a handler to the ServeMux for pattern.
-func (mux *ServeMux) Handle(pattern string, handler Handler) {
- if pattern == "" {
- panic("dns: invalid pattern " + pattern)
- }
- mux.m.Lock()
- mux.z[Fqdn(pattern)] = handler
- mux.m.Unlock()
-}
-
-// HandleFunc adds a handler function to the ServeMux for pattern.
-func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
- mux.Handle(pattern, HandlerFunc(handler))
-}
-
-// HandleRemove deregistrars the handler specific for pattern from the ServeMux.
-func (mux *ServeMux) HandleRemove(pattern string) {
- if pattern == "" {
- panic("dns: invalid pattern " + pattern)
- }
- mux.m.Lock()
- delete(mux.z, Fqdn(pattern))
- mux.m.Unlock()
-}
-
-// ServeDNS dispatches the request to the handler whose
-// pattern most closely matches the request message. If DefaultServeMux
-// is used the correct thing for DS queries is done: a possible parent
-// is sought.
-// If no handler is found a standard SERVFAIL message is returned
-// If the request message does not have exactly one question in the
-// question section a SERVFAIL is returned, unlesss Unsafe is true.
-func (mux *ServeMux) ServeDNS(w ResponseWriter, request *Msg) {
- var h Handler
- if len(request.Question) < 1 { // allow more than one question
- h = failedHandler()
- } else {
- if h = mux.match(request.Question[0].Name, request.Question[0].Qtype); h == nil {
- h = failedHandler()
- }
- }
- h.ServeDNS(w, request)
-}
-
-// Handle registers the handler with the given pattern
-// in the DefaultServeMux. The documentation for
-// ServeMux explains how patterns are matched.
-func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) }
-
-// HandleRemove deregisters the handle with the given pattern
-// in the DefaultServeMux.
-func HandleRemove(pattern string) { DefaultServeMux.HandleRemove(pattern) }
-
-// HandleFunc registers the handler function with the given pattern
-// in the DefaultServeMux.
-func HandleFunc(pattern string, handler func(ResponseWriter, *Msg)) {
- DefaultServeMux.HandleFunc(pattern, handler)
-}
-
// Writer writes raw DNS messages; each call to Write should send an entire message.
type Writer interface {
io.Writer
@@ -523,14 +416,13 @@ func (srv *Server) Shutdown() error {
// to terminate.
func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.lock.Lock()
- started := srv.started
- srv.started = false
- srv.lock.Unlock()
-
- if !started {
+ if !srv.started {
+ srv.lock.Unlock()
return &Error{err: "server not started"}
}
+ srv.started = false
+
if srv.PacketConn != nil {
srv.PacketConn.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
@@ -539,10 +431,10 @@ func (srv *Server) ShutdownContext(ctx context.Context) error {
srv.Listener.Close()
}
- srv.lock.Lock()
for rw := range srv.conns {
rw.SetReadDeadline(aLongTimeAgo) // Unblock reads
}
+
srv.lock.Unlock()
if testShutdownNotify != nil {
@@ -729,20 +621,23 @@ func (srv *Server) serve(w *response) {
}
}
-func (srv *Server) serveDNS(w *response) {
- req := new(Msg)
- err := req.Unpack(w.msg)
+func (srv *Server) disposeBuffer(w *response) {
if w.udp != nil && cap(w.msg) == srv.UDPSize {
srv.udpPool.Put(w.msg[:srv.UDPSize])
}
w.msg = nil
+}
+
+func (srv *Server) serveDNS(w *response) {
+ req := new(Msg)
+ err := req.Unpack(w.msg)
if err != nil { // Send a FormatError back
x := new(Msg)
x.SetRcodeFormatError(req)
w.WriteMsg(x)
- return
}
- if !srv.Unsafe && req.Response {
+ if err != nil || !srv.Unsafe && req.Response {
+ srv.disposeBuffer(w)
return
}
@@ -759,6 +654,8 @@ func (srv *Server) serveDNS(w *response) {
}
}
+ srv.disposeBuffer(w)
+
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
@@ -768,7 +665,16 @@ func (srv *Server) serveDNS(w *response) {
}
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
- conn.SetReadDeadline(time.Now().Add(timeout))
+ // If we race with ShutdownContext, the read deadline may
+ // have been set in the distant past to unblock the read
+ // below. We must not override it, otherwise we may block
+ // ShutdownContext.
+ srv.lock.RLock()
+ if srv.started {
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ }
+ srv.lock.RUnlock()
+
l := make([]byte, 2)
n, err := conn.Read(l)
if err != nil || n != 2 {
@@ -803,7 +709,13 @@ func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
}
func (srv *Server) readUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
- conn.SetReadDeadline(time.Now().Add(timeout))
+ srv.lock.RLock()
+ if srv.started {
+ // See the comment in readTCP above.
+ conn.SetReadDeadline(time.Now().Add(timeout))
+ }
+ srv.lock.RUnlock()
+
m := srv.udpPool.Get().([]byte)
n, s, err := ReadFromSessionUDP(conn, m)
if err != nil {
@@ -855,24 +767,33 @@ func (w *response) Write(m []byte) (int, error) {
n, err := io.Copy(w.tcp, bytes.NewReader(m))
return int(n), err
+ default:
+ panic("dns: Write called after Close")
}
- panic("not reached")
}
// LocalAddr implements the ResponseWriter.LocalAddr method.
func (w *response) LocalAddr() net.Addr {
- if w.tcp != nil {
+ switch {
+ case w.udp != nil:
+ return w.udp.LocalAddr()
+ case w.tcp != nil:
return w.tcp.LocalAddr()
+ default:
+ panic("dns: LocalAddr called after Close")
}
- return w.udp.LocalAddr()
}
// RemoteAddr implements the ResponseWriter.RemoteAddr method.
func (w *response) RemoteAddr() net.Addr {
- if w.tcp != nil {
+ switch {
+ case w.udpSession != nil:
+ return w.udpSession.RemoteAddr()
+ case w.tcp != nil:
return w.tcp.RemoteAddr()
+ default:
+ panic("dns: RemoteAddr called after Close")
}
- return w.udpSession.RemoteAddr()
}
// TsigStatus implements the ResponseWriter.TsigStatus method.
@@ -894,3 +815,15 @@ func (w *response) Close() error {
}
return nil
}
+
+// ConnectionState() implements the ConnectionStater.ConnectionState() interface.
+func (w *response) ConnectionState() *tls.ConnectionState {
+ type tlsConnectionStater interface {
+ ConnectionState() tls.ConnectionState
+ }
+ if v, ok := w.tcp.(tlsConnectionStater); ok {
+ t := v.ConnectionState()
+ return &t
+ }
+ return nil
+}
diff --git a/vendor/github.com/miekg/dns/sig0.go b/vendor/github.com/miekg/dns/sig0.go
index f31e9e684..07c2acb19 100644
--- a/vendor/github.com/miekg/dns/sig0.go
+++ b/vendor/github.com/miekg/dns/sig0.go
@@ -127,8 +127,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
if offset+1 >= buflen {
continue
}
- var rdlen uint16
- rdlen = binary.BigEndian.Uint16(buf[offset:])
+ rdlen := binary.BigEndian.Uint16(buf[offset:])
offset += 2
offset += int(rdlen)
}
diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go
index a64f4d7d8..115f2c7bd 100644
--- a/vendor/github.com/miekg/dns/types.go
+++ b/vendor/github.com/miekg/dns/types.go
@@ -419,128 +419,130 @@ type TXT struct {
func (rr *TXT) String() string { return rr.Hdr.String() + sprintTxt(rr.Txt) }
func sprintName(s string) string {
- src := []byte(s)
- dst := make([]byte, 0, len(src))
- for i := 0; i < len(src); {
- if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
- dst = append(dst, src[i:i+2]...)
+ var dst strings.Builder
+ dst.Grow(len(s))
+ for i := 0; i < len(s); {
+ if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
+ dst.WriteString(s[i : i+2])
i += 2
- } else {
- b, n := nextByte(src, i)
- if n == 0 {
- i++ // dangling back slash
- } else if b == '.' {
- dst = append(dst, b)
- } else {
- dst = appendDomainNameByte(dst, b)
- }
- i += n
+ continue
}
+
+ b, n := nextByte(s, i)
+ switch {
+ case n == 0:
+ i++ // dangling back slash
+ case b == '.':
+ dst.WriteByte('.')
+ default:
+ writeDomainNameByte(&dst, b)
+ }
+ i += n
}
- return string(dst)
+ return dst.String()
}
func sprintTxtOctet(s string) string {
- src := []byte(s)
- dst := make([]byte, 0, len(src))
- dst = append(dst, '"')
- for i := 0; i < len(src); {
- if i+1 < len(src) && src[i] == '\\' && src[i+1] == '.' {
- dst = append(dst, src[i:i+2]...)
+ var dst strings.Builder
+ dst.Grow(2 + len(s))
+ dst.WriteByte('"')
+ for i := 0; i < len(s); {
+ if i+1 < len(s) && s[i] == '\\' && s[i+1] == '.' {
+ dst.WriteString(s[i : i+2])
i += 2
- } else {
- b, n := nextByte(src, i)
- if n == 0 {
- i++ // dangling back slash
- } else if b == '.' {
- dst = append(dst, b)
- } else {
- if b < ' ' || b > '~' {
- dst = appendByte(dst, b)
- } else {
- dst = append(dst, b)
- }
- }
- i += n
+ continue
+ }
+
+ b, n := nextByte(s, i)
+ switch {
+ case n == 0:
+ i++ // dangling back slash
+ case b == '.':
+ dst.WriteByte('.')
+ case b < ' ' || b > '~':
+ writeEscapedByte(&dst, b)
+ default:
+ dst.WriteByte(b)
}
+ i += n
}
- dst = append(dst, '"')
- return string(dst)
+ dst.WriteByte('"')
+ return dst.String()
}
func sprintTxt(txt []string) string {
- var out []byte
+ var out strings.Builder
for i, s := range txt {
+ out.Grow(3 + len(s))
if i > 0 {
- out = append(out, ` "`...)
+ out.WriteString(` "`)
} else {
- out = append(out, '"')
+ out.WriteByte('"')
}
- bs := []byte(s)
- for j := 0; j < len(bs); {
- b, n := nextByte(bs, j)
+ for j := 0; j < len(s); {
+ b, n := nextByte(s, j)
if n == 0 {
break
}
- out = appendTXTStringByte(out, b)
+ writeTXTStringByte(&out, b)
j += n
}
- out = append(out, '"')
+ out.WriteByte('"')
}
- return string(out)
+ return out.String()
}
-func appendDomainNameByte(s []byte, b byte) []byte {
+func writeDomainNameByte(s *strings.Builder, b byte) {
switch b {
case '.', ' ', '\'', '@', ';', '(', ')': // additional chars to escape
- return append(s, '\\', b)
+ s.WriteByte('\\')
+ s.WriteByte(b)
+ default:
+ writeTXTStringByte(s, b)
}
- return appendTXTStringByte(s, b)
}
-func appendTXTStringByte(s []byte, b byte) []byte {
- switch b {
- case '"', '\\':
- return append(s, '\\', b)
+func writeTXTStringByte(s *strings.Builder, b byte) {
+ switch {
+ case b == '"' || b == '\\':
+ s.WriteByte('\\')
+ s.WriteByte(b)
+ case b < ' ' || b > '~':
+ writeEscapedByte(s, b)
+ default:
+ s.WriteByte(b)
}
- if b < ' ' || b > '~' {
- return appendByte(s, b)
- }
- return append(s, b)
}
-func appendByte(s []byte, b byte) []byte {
+func writeEscapedByte(s *strings.Builder, b byte) {
var buf [3]byte
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
- s = append(s, '\\')
- for i := 0; i < 3-len(bufs); i++ {
- s = append(s, '0')
- }
- for _, r := range bufs {
- s = append(s, r)
+ s.WriteByte('\\')
+ for i := len(bufs); i < 3; i++ {
+ s.WriteByte('0')
}
- return s
+ s.Write(bufs)
}
-func nextByte(b []byte, offset int) (byte, int) {
- if offset >= len(b) {
+func nextByte(s string, offset int) (byte, int) {
+ if offset >= len(s) {
return 0, 0
}
- if b[offset] != '\\' {
+ if s[offset] != '\\' {
// not an escape sequence
- return b[offset], 1
+ return s[offset], 1
}
- switch len(b) - offset {
+ switch len(s) - offset {
case 1: // dangling escape
return 0, 0
case 2, 3: // too short to be \ddd
default: // maybe \ddd
- if isDigit(b[offset+1]) && isDigit(b[offset+2]) && isDigit(b[offset+3]) {
- return dddToByte(b[offset+1:]), 4
+ if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) {
+ return dddStringToByte(s[offset+1:]), 4
}
}
// not \ddd, just an RFC 1035 "quoted" character
- return b[offset+1], 2
+ return s[offset+1], 2
}
// SPF RR. See RFC 4408, Section 3.1.1.
diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go
index 82ead6939..a4826ee2f 100644
--- a/vendor/github.com/miekg/dns/udp.go
+++ b/vendor/github.com/miekg/dns/udp.go
@@ -1,3 +1,5 @@
+// +build !windows
+
package dns
import (
diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go
new file mode 100644
index 000000000..6778c3c6c
--- /dev/null
+++ b/vendor/github.com/miekg/dns/udp_windows.go
@@ -0,0 +1,37 @@
+// +build windows
+
+package dns
+
+import "net"
+
+// SessionUDP holds the remote address
+type SessionUDP struct {
+ raddr *net.UDPAddr
+}
+
+// RemoteAddr returns the remote network address.
+func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr }
+
+// ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a
+// net.UDPAddr.
+// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP.
+func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
+ n, raddr, err := conn.ReadFrom(b)
+ if err != nil {
+ return n, nil, err
+ }
+ session := &SessionUDP{raddr.(*net.UDPAddr)}
+ return n, session, err
+}
+
+// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
+// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
+func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
+ n, err := conn.WriteTo(b, session.raddr)
+ return n, err
+}
+
+// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods
+// use the standard method in udp.go for these.
+func setUDPSocketOptions(*net.UDPConn) error { return nil }
+func parseDstFromOOB([]byte, net.IP) net.IP { return nil }
diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go
index 18d1aa1f9..403b9ef97 100644
--- a/vendor/github.com/miekg/dns/version.go
+++ b/vendor/github.com/miekg/dns/version.go
@@ -3,7 +3,7 @@ package dns
import "fmt"
// Version is current version of this library.
-var Version = V{1, 0, 9}
+var Version = V{1, 0, 13}
// V holds the version of this library.
type V struct {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
index 08491bef0..c0d70b2fa 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go
@@ -40,7 +40,8 @@ type Collector interface {
// Collector may yield any Metric it sees fit in its Collect method.
//
// This method idempotently sends the same descriptors throughout the
- // lifetime of the Collector.
+ // lifetime of the Collector. It may be called concurrently and
+ // therefore must be implemented in a concurrency safe way.
//
// If a Collector encounters an error while executing this method, it
// must send an invalid descriptor (created with NewInvalidDesc) to
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 29dc8e348..4d7fa976e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -187,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
desc: desc,
upperBounds: opts.Buckets,
labelPairs: makeLabelPairs(desc, labelValues),
+ counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
}
for i, upperBound := range h.upperBounds {
if i < len(h.upperBounds)-1 {
@@ -223,6 +224,21 @@ type histogramCounts struct {
}
type histogram struct {
+ // countAndHotIdx is a complicated one. For lock-free yet atomic
+ // observations, we need to save the total count of observations again,
+ // combined with the index of the currently-hot counts struct, so that
+ // we can perform the operation on both values atomically. The least
+ // significant bit defines the hot counts struct. The remaining 63 bits
+ // represent the total count of observations. This happens under the
+ // assumption that the 63bit count will never overflow. Rationale: An
+ // observations takes about 30ns. Let's assume it could happen in
+ // 10ns. Overflowing the counter will then take at least (2^63)*10ns,
+ // which is about 3000 years.
+ //
+ // This has to be first in the struct for 64bit alignment. See
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG
+ countAndHotIdx uint64
+
selfCollector
desc *Desc
writeMtx sync.Mutex // Only used in the Write method.
@@ -230,23 +246,12 @@ type histogram struct {
upperBounds []float64
// Two counts, one is "hot" for lock-free observations, the other is
- // "cold" for writing out a dto.Metric.
- counts [2]histogramCounts
-
+ // "cold" for writing out a dto.Metric. It has to be an array of
+ // pointers to guarantee 64bit alignment of the histogramCounts, see
+ // http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
+ counts [2]*histogramCounts
hotIdx int // Index of currently-hot counts. Only used within Write.
- // This is a complicated one. For lock-free yet atomic observations, we
- // need to save the total count of observations again, combined with the
- // index of the currently-hot counts struct, so that we can perform the
- // operation on both values atomically. The least significant bit
- // defines the hot counts struct. The remaining 63 bits represent the
- // total count of observations. This happens under the assumption that
- // the 63bit count will never overflow. Rationale: An observations takes
- // about 30ns. Let's assume it could happen in 10ns. Overflowing the
- // counter will then take at least (2^63)*10ns, which is about 3000
- // years.
- countAndHotIdx uint64
-
labelPairs []*dto.LabelPair
}
@@ -270,7 +275,7 @@ func (h *histogram) Observe(v float64) {
// 63 bits gets incremented by 1. At the same time, we get the new value
// back, which we can use to find the currently-hot counts.
n := atomic.AddUint64(&h.countAndHotIdx, 2)
- hotCounts := &h.counts[n%2]
+ hotCounts := h.counts[n%2]
if i < len(h.upperBounds) {
atomic.AddUint64(&hotCounts.buckets[i], 1)
@@ -322,13 +327,13 @@ func (h *histogram) Write(out *dto.Metric) error {
if h.hotIdx == 0 {
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
h.hotIdx = 1
- hotCounts = &h.counts[1]
- coldCounts = &h.counts[0]
+ hotCounts = h.counts[1]
+ coldCounts = h.counts[0]
} else {
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
h.hotIdx = 0
- hotCounts = &h.counts[0]
- coldCounts = &h.counts[1]
+ hotCounts = h.counts[0]
+ coldCounts = h.counts[1]
}
// Now we have to wait for the now-declared-cold counts to actually cool
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 2c0b90888..e422ef383 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -107,9 +107,6 @@ type Registerer interface {
// Collector, and for providing a Collector that will not cause
// inconsistent metrics on collection. (This would lead to scrape
// errors.)
- //
- // It is in general not safe to register the same Collector multiple
- // times concurrently.
Register(Collector) error
// MustRegister works like Register but registers any number of
// Collectors and panics upon the first registration that causes an
@@ -273,7 +270,12 @@ func (r *Registry) Register(c Collector) error {
close(descChan)
}()
r.mtx.Lock()
- defer r.mtx.Unlock()
+ defer func() {
+ // Drain channel in case of premature return to not leak a goroutine.
+ for range descChan {
+ }
+ r.mtx.Unlock()
+ }()
// Conduct various tests...
for desc := range descChan {
@@ -785,6 +787,8 @@ func checkMetricConsistency(
dtoMetric *dto.Metric,
metricHashes map[uint64]struct{},
) error {
+ name := metricFamily.GetName()
+
// Type consistency with metric family.
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
@@ -793,33 +797,42 @@ func checkMetricConsistency(
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
return fmt.Errorf(
"collected metric %q { %s} is not a %s",
- metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
+ name, dtoMetric, metricFamily.GetType(),
)
}
+ previousLabelName := ""
for _, labelPair := range dtoMetric.GetLabel() {
- if !checkLabelName(labelPair.GetName()) {
+ labelName := labelPair.GetName()
+ if labelName == previousLabelName {
+ return fmt.Errorf(
+ "collected metric %q { %s} has two or more labels with the same name: %s",
+ name, dtoMetric, labelName,
+ )
+ }
+ if !checkLabelName(labelName) {
return fmt.Errorf(
"collected metric %q { %s} has a label with an invalid name: %s",
- metricFamily.GetName(), dtoMetric, labelPair.GetName(),
+ name, dtoMetric, labelName,
)
}
- if dtoMetric.Summary != nil && labelPair.GetName() == quantileLabel {
+ if dtoMetric.Summary != nil && labelName == quantileLabel {
return fmt.Errorf(
"collected metric %q { %s} must not have an explicit %q label",
- metricFamily.GetName(), dtoMetric, quantileLabel,
+ name, dtoMetric, quantileLabel,
)
}
if !utf8.ValidString(labelPair.GetValue()) {
return fmt.Errorf(
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
- metricFamily.GetName(), dtoMetric, labelPair.GetName(), labelPair.GetValue())
+ name, dtoMetric, labelName, labelPair.GetValue())
}
+ previousLabelName = labelName
}
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
h := hashNew()
- h = hashAdd(h, metricFamily.GetName())
+ h = hashAdd(h, name)
h = hashAddByte(h, separatorByte)
// Make sure label pairs are sorted. We depend on it for the consistency
// check.
@@ -833,7 +846,7 @@ func checkMetricConsistency(
if _, exists := metricHashes[h]; exists {
return fmt.Errorf(
"collected metric %q { %s} was collected before with the same name and label values",
- metricFamily.GetName(), dtoMetric,
+ name, dtoMetric,
)
}
metricHashes[h] = struct{}{}
diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go
index f11321cd0..46b74364e 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_create.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_create.go
@@ -14,13 +14,44 @@
package expfmt
import (
+ "bytes"
"fmt"
"io"
"math"
- "strings"
+ "strconv"
+ "sync"
- dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/model"
+
+ dto "github.com/prometheus/client_model/go"
+)
+
+// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer
+// implements it.
+type enhancedWriter interface {
+ io.Writer
+ WriteRune(r rune) (n int, err error)
+ WriteString(s string) (n int, err error)
+ WriteByte(c byte) error
+}
+
+const (
+ initialBufSize = 512
+ initialNumBufSize = 24
+)
+
+var (
+ bufPool = sync.Pool{
+ New: func() interface{} {
+ return bytes.NewBuffer(make([]byte, 0, initialNumBufSize))
+ },
+ }
+ numBufPool = sync.Pool{
+ New: func() interface{} {
+ b := make([]byte, 0, initialNumBufSize)
+ return &b
+ },
+ }
)
// MetricFamilyToText converts a MetricFamily proto message into text format and
@@ -32,37 +63,92 @@ import (
// will result in invalid text format output.
//
// This method fulfills the type 'prometheus.encoder'.
-func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
- var written int
-
+func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) {
// Fail-fast checks.
if len(in.Metric) == 0 {
- return written, fmt.Errorf("MetricFamily has no metrics: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no metrics: %s", in)
}
name := in.GetName()
if name == "" {
- return written, fmt.Errorf("MetricFamily has no name: %s", in)
+ return 0, fmt.Errorf("MetricFamily has no name: %s", in)
}
+ // Try the interface upgrade. If it doesn't work, we'll use a
+ // bytes.Buffer from the sync.Pool and write out its content to out in a
+ // single go in the end.
+ w, ok := out.(enhancedWriter)
+ if !ok {
+ b := bufPool.Get().(*bytes.Buffer)
+ b.Reset()
+ w = b
+ defer func() {
+ bWritten, bErr := out.Write(b.Bytes())
+ written = bWritten
+ if err == nil {
+ err = bErr
+ }
+ bufPool.Put(b)
+ }()
+ }
+
+ var n int
+
// Comments, first HELP, then TYPE.
if in.Help != nil {
- n, err := fmt.Fprintf(
- out, "# HELP %s %s\n",
- name, escapeString(*in.Help, false),
- )
+ n, err = w.WriteString("# HELP ")
written += n
if err != nil {
- return written, err
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return
+ }
+ n, err = writeEscapedString(w, *in.Help, false)
+ written += n
+ if err != nil {
+ return
+ }
+ err = w.WriteByte('\n')
+ written++
+ if err != nil {
+ return
}
}
+ n, err = w.WriteString("# TYPE ")
+ written += n
+ if err != nil {
+ return
+ }
+ n, err = w.WriteString(name)
+ written += n
+ if err != nil {
+ return
+ }
metricType := in.GetType()
- n, err := fmt.Fprintf(
- out, "# TYPE %s %s\n",
- name, strings.ToLower(metricType.String()),
- )
+ switch metricType {
+ case dto.MetricType_COUNTER:
+ n, err = w.WriteString(" counter\n")
+ case dto.MetricType_GAUGE:
+ n, err = w.WriteString(" gauge\n")
+ case dto.MetricType_SUMMARY:
+ n, err = w.WriteString(" summary\n")
+ case dto.MetricType_UNTYPED:
+ n, err = w.WriteString(" untyped\n")
+ case dto.MetricType_HISTOGRAM:
+ n, err = w.WriteString(" histogram\n")
+ default:
+ return written, fmt.Errorf("unknown metric type %s", metricType.String())
+ }
written += n
if err != nil {
- return written, err
+ return
}
// Finally the samples, one line for each.
@@ -75,9 +161,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Counter.GetValue(),
- out,
)
case dto.MetricType_GAUGE:
if metric.Gauge == nil {
@@ -86,9 +171,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Gauge.GetValue(),
- out,
)
case dto.MetricType_UNTYPED:
if metric.Untyped == nil {
@@ -97,9 +181,8 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
n, err = writeSample(
- name, metric, "", "",
+ w, name, "", metric, "", 0,
metric.Untyped.GetValue(),
- out,
)
case dto.MetricType_SUMMARY:
if metric.Summary == nil {
@@ -109,29 +192,26 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
for _, q := range metric.Summary.Quantile {
n, err = writeSample(
- name, metric,
- model.QuantileLabel, fmt.Sprint(q.GetQuantile()),
+ w, name, "", metric,
+ model.QuantileLabel, q.GetQuantile(),
q.GetValue(),
- out,
)
written += n
if err != nil {
- return written, err
+ return
}
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Summary.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Summary.GetSampleCount()),
- out,
)
case dto.MetricType_HISTOGRAM:
if metric.Histogram == nil {
@@ -140,46 +220,42 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
)
}
infSeen := false
- for _, q := range metric.Histogram.Bucket {
+ for _, b := range metric.Histogram.Bucket {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, fmt.Sprint(q.GetUpperBound()),
- float64(q.GetCumulativeCount()),
- out,
+ w, name, "_bucket", metric,
+ model.BucketLabel, b.GetUpperBound(),
+ float64(b.GetCumulativeCount()),
)
written += n
if err != nil {
- return written, err
+ return
}
- if math.IsInf(q.GetUpperBound(), +1) {
+ if math.IsInf(b.GetUpperBound(), +1) {
infSeen = true
}
}
if !infSeen {
n, err = writeSample(
- name+"_bucket", metric,
- model.BucketLabel, "+Inf",
+ w, name, "_bucket", metric,
+ model.BucketLabel, math.Inf(+1),
float64(metric.Histogram.GetSampleCount()),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
}
n, err = writeSample(
- name+"_sum", metric, "", "",
+ w, name, "_sum", metric, "", 0,
metric.Histogram.GetSampleSum(),
- out,
)
+ written += n
if err != nil {
- return written, err
+ return
}
- written += n
n, err = writeSample(
- name+"_count", metric, "", "",
+ w, name, "_count", metric, "", 0,
float64(metric.Histogram.GetSampleCount()),
- out,
)
default:
return written, fmt.Errorf(
@@ -188,116 +264,219 @@ func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {
}
written += n
if err != nil {
- return written, err
+ return
}
}
- return written, nil
+ return
}
-// writeSample writes a single sample in text format to out, given the metric
+// writeSample writes a single sample in text format to w, given the metric
// name, the metric proto message itself, optionally an additional label name
-// and value (use empty strings if not required), and the value. The function
-// returns the number of bytes written and any error encountered.
+// with a float64 value (use empty string as label name if not required), and
+// the value. The function returns the number of bytes written and any error
+// encountered.
func writeSample(
- name string,
+ w enhancedWriter,
+ name, suffix string,
metric *dto.Metric,
- additionalLabelName, additionalLabelValue string,
+ additionalLabelName string, additionalLabelValue float64,
value float64,
- out io.Writer,
) (int, error) {
var written int
- n, err := fmt.Fprint(out, name)
+ n, err := w.WriteString(name)
written += n
if err != nil {
return written, err
}
- n, err = labelPairsToText(
- metric.Label,
- additionalLabelName, additionalLabelValue,
- out,
+ if suffix != "" {
+ n, err = w.WriteString(suffix)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ }
+ n, err = writeLabelPairs(
+ w, metric.Label, additionalLabelName, additionalLabelValue,
)
written += n
if err != nil {
return written, err
}
- n, err = fmt.Fprintf(out, " %v", value)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, value)
written += n
if err != nil {
return written, err
}
if metric.TimestampMs != nil {
- n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs)
+ err = w.WriteByte(' ')
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err = writeInt(w, *metric.TimestampMs)
written += n
if err != nil {
return written, err
}
}
- n, err = out.Write([]byte{'\n'})
- written += n
+ err = w.WriteByte('\n')
+ written++
if err != nil {
return written, err
}
return written, nil
}
-// labelPairsToText converts a slice of LabelPair proto messages plus the
+// writeLabelPairs converts a slice of LabelPair proto messages plus the
// explicitly given additional label pair into text formatted as required by the
-// text format and writes it to 'out'. An empty slice in combination with an
-// empty string 'additionalLabelName' results in nothing being
-// written. Otherwise, the label pairs are written, escaped as required by the
-// text format, and enclosed in '{...}'. The function returns the number of
-// bytes written and any error encountered.
-func labelPairsToText(
+// text format and writes it to 'w'. An empty slice in combination with an empty
+// string 'additionalLabelName' results in nothing being written. Otherwise, the
+// label pairs are written, escaped as required by the text format, and enclosed
+// in '{...}'. The function returns the number of bytes written and any error
+// encountered.
+func writeLabelPairs(
+ w enhancedWriter,
in []*dto.LabelPair,
- additionalLabelName, additionalLabelValue string,
- out io.Writer,
+ additionalLabelName string, additionalLabelValue float64,
) (int, error) {
if len(in) == 0 && additionalLabelName == "" {
return 0, nil
}
- var written int
- separator := '{'
+ var (
+ written int
+ separator byte = '{'
+ )
for _, lp := range in {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, lp.GetName(), escapeString(lp.GetValue(), true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(lp.GetName())
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = w.WriteString(`="`)
written += n
if err != nil {
return written, err
}
+ n, err = writeEscapedString(w, lp.GetValue(), true)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
separator = ','
}
if additionalLabelName != "" {
- n, err := fmt.Fprintf(
- out, `%c%s="%s"`,
- separator, additionalLabelName,
- escapeString(additionalLabelValue, true),
- )
+ err := w.WriteByte(separator)
+ written++
+ if err != nil {
+ return written, err
+ }
+ n, err := w.WriteString(additionalLabelName)
written += n
if err != nil {
return written, err
}
+ n, err = w.WriteString(`="`)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ n, err = writeFloat(w, additionalLabelValue)
+ written += n
+ if err != nil {
+ return written, err
+ }
+ err = w.WriteByte('"')
+ written++
+ if err != nil {
+ return written, err
+ }
}
- n, err := out.Write([]byte{'}'})
- written += n
+ err := w.WriteByte('}')
+ written++
if err != nil {
return written, err
}
return written, nil
}
-var (
- escape = strings.NewReplacer("\\", `\\`, "\n", `\n`)
- escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`)
-)
-
-// escapeString replaces '\' by '\\', new line character by '\n', and - if
+// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if
// includeDoubleQuote is true - '"' by '\"'.
-func escapeString(v string, includeDoubleQuote bool) string {
- if includeDoubleQuote {
- return escapeWithDoubleQuote.Replace(v)
+func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) {
+ var (
+ written, n int
+ err error
+ )
+ for _, r := range v {
+ switch r {
+ case '\\':
+ n, err = w.WriteString(`\\`)
+ case '\n':
+ n, err = w.WriteString(`\n`)
+ case '"':
+ if includeDoubleQuote {
+ n, err = w.WriteString(`\"`)
+ } else {
+ n, err = w.WriteRune(r)
+ }
+ default:
+ n, err = w.WriteRune(r)
+ }
+ written += n
+ if err != nil {
+ return written, err
+ }
}
+ return written, nil
+}
+
+// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes
+// a few common cases for increased efficiency. For non-hardcoded cases, it uses
+// strconv.AppendFloat to avoid allocations, similar to writeInt.
+func writeFloat(w enhancedWriter, f float64) (int, error) {
+ switch {
+ case f == 1:
+ return 1, w.WriteByte('1')
+ case f == 0:
+ return 1, w.WriteByte('0')
+ case f == -1:
+ return w.WriteString("-1")
+ case math.IsNaN(f):
+ return w.WriteString("NaN")
+ case math.IsInf(f, +1):
+ return w.WriteString("+Inf")
+ case math.IsInf(f, -1):
+ return w.WriteString("-Inf")
+ default:
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
+ }
+}
- return escape.Replace(v)
+// writeInt is equivalent to fmt.Fprint with an int64 argument but uses
+// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid
+// allocations.
+func writeInt(w enhancedWriter, i int64) (int, error) {
+ bp := numBufPool.Get().(*[]byte)
+ *bp = strconv.AppendInt((*bp)[:0], i, 10)
+ written, err := w.Write(*bp)
+ numBufPool.Put(bp)
+ return written, err
}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go
index 1ad21c91a..2ff228e9d 100644
--- a/vendor/github.com/prometheus/procfs/internal/util/parse.go
+++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go
@@ -13,7 +13,11 @@
package util
-import "strconv"
+import (
+ "io/ioutil"
+ "strconv"
+ "strings"
+)
// ParseUint32s parses a slice of strings into a slice of uint32s.
func ParseUint32s(ss []string) ([]uint32, error) {
@@ -44,3 +48,12 @@ func ParseUint64s(ss []string) ([]uint64, error) {
return us, nil
}
+
+// ReadUintFromFile reads a file and attempts to parse a uint64 from it.
+func ReadUintFromFile(path string) (uint64, error) {
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
+}
diff --git a/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
new file mode 100644
index 000000000..df0d567b7
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/internal/util/sysreadfile_linux.go
@@ -0,0 +1,45 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package util
+
+import (
+ "bytes"
+ "os"
+ "syscall"
+)
+
+// SysReadFile is a simplified ioutil.ReadFile that invokes syscall.Read directly.
+// https://github.com/prometheus/node_exporter/pull/728/files
+func SysReadFile(file string) (string, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+
+ // On some machines, hwmon drivers are broken and return EAGAIN. This causes
+ // Go's ioutil.ReadFile implementation to poll forever.
+ //
+ // Since we either want to read data or bail immediately, do the simplest
+ // possible read using syscall directly.
+ b := make([]byte, 128)
+ n, err := syscall.Read(int(f.Fd()), b)
+ if err != nil {
+ return "", err
+ }
+
+ return string(bytes.TrimSpace(b[:n])), nil
+}
diff --git a/vendor/github.com/siddontang/go-log/LICENSE b/vendor/github.com/siddontang/go-log/LICENSE
new file mode 100644
index 000000000..7ece9fdf5
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 siddontang
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/siddontang/go-log/log/doc.go b/vendor/github.com/siddontang/go-log/log/doc.go
new file mode 100644
index 000000000..7477f3af4
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/log/doc.go
@@ -0,0 +1,20 @@
+// Package log supplies more advanced features than go orign log package.
+//
+// It supports log different level: trace, debug, info, warn, error, fatal.
+//
+// It also supports different log handlers which you can log to stdout, file, socket, etc...
+//
+// Use
+//
+// import "github.com/siddontang/go-log/log"
+//
+// //log with different level
+// log.Info("hello world")
+// log.Error("hello world")
+//
+// //create a logger with specified handler
+// h := NewStreamHandler(os.Stdout)
+// l := log.NewDefault(h)
+// l.Info("hello world")
+//
+package log
diff --git a/vendor/github.com/siddontang/go-log/log/filehandler.go b/vendor/github.com/siddontang/go-log/log/filehandler.go
new file mode 100644
index 000000000..454865d81
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/log/filehandler.go
@@ -0,0 +1,230 @@
+package log
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "time"
+)
+
+// FileHandler writes log to a file.
+type FileHandler struct {
+ fd *os.File
+}
+
+// NewFileHandler creates a FileHander
+func NewFileHandler(fileName string, flag int) (*FileHandler, error) {
+ dir := path.Dir(fileName)
+ os.Mkdir(dir, 0777)
+
+ f, err := os.OpenFile(fileName, flag, 0755)
+ if err != nil {
+ return nil, err
+ }
+
+ h := new(FileHandler)
+
+ h.fd = f
+
+ return h, nil
+}
+
+// Write implements Handler interface
+func (h *FileHandler) Write(b []byte) (n int, err error) {
+ return h.fd.Write(b)
+}
+
+// Close implements Handler interface
+func (h *FileHandler) Close() error {
+ return h.fd.Close()
+}
+
+// RotatingFileHandler writes log a file, if file size exceeds maxBytes,
+// it will backup current file and open a new one.
+//
+// max backup file number is set by backupCount, it will delete oldest if backups too many.
+type RotatingFileHandler struct {
+ fd *os.File
+
+ fileName string
+ maxBytes int
+ curBytes int
+ backupCount int
+}
+
+// NewRotatingFileHandler creates a RotatingFileHandler
+func NewRotatingFileHandler(fileName string, maxBytes int, backupCount int) (*RotatingFileHandler, error) {
+ dir := path.Dir(fileName)
+ os.MkdirAll(dir, 0777)
+
+ h := new(RotatingFileHandler)
+
+ if maxBytes <= 0 {
+ return nil, fmt.Errorf("invalid max bytes")
+ }
+
+ h.fileName = fileName
+ h.maxBytes = maxBytes
+ h.backupCount = backupCount
+
+ var err error
+ h.fd, err = os.OpenFile(fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+
+ f, err := h.fd.Stat()
+ if err != nil {
+ return nil, err
+ }
+ h.curBytes = int(f.Size())
+
+ return h, nil
+}
+
+// Write implements Handler interface
+func (h *RotatingFileHandler) Write(p []byte) (n int, err error) {
+ h.doRollover()
+ n, err = h.fd.Write(p)
+ h.curBytes += n
+ return
+}
+
+// Close implements Handler interface
+func (h *RotatingFileHandler) Close() error {
+ if h.fd != nil {
+ return h.fd.Close()
+ }
+ return nil
+}
+
+func (h *RotatingFileHandler) doRollover() {
+ if h.curBytes < h.maxBytes {
+ return
+ }
+
+ f, err := h.fd.Stat()
+ if err != nil {
+ return
+ }
+
+ if h.maxBytes <= 0 {
+ return
+ } else if f.Size() < int64(h.maxBytes) {
+ h.curBytes = int(f.Size())
+ return
+ }
+
+ if h.backupCount > 0 {
+ h.fd.Close()
+
+ for i := h.backupCount - 1; i > 0; i-- {
+ sfn := fmt.Sprintf("%s.%d", h.fileName, i)
+ dfn := fmt.Sprintf("%s.%d", h.fileName, i+1)
+
+ os.Rename(sfn, dfn)
+ }
+
+ dfn := fmt.Sprintf("%s.1", h.fileName)
+ os.Rename(h.fileName, dfn)
+
+ h.fd, _ = os.OpenFile(h.fileName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ h.curBytes = 0
+ f, err := h.fd.Stat()
+ if err != nil {
+ return
+ }
+ h.curBytes = int(f.Size())
+ }
+}
+
+// TimeRotatingFileHandler writes log to a file,
+// it will backup current and open a new one, with a period time you sepecified.
+//
+// refer: http://docs.python.org/2/library/logging.handlers.html.
+// same like python TimedRotatingFileHandler.
+type TimeRotatingFileHandler struct {
+ fd *os.File
+
+ baseName string
+ interval int64
+ suffix string
+ rolloverAt int64
+}
+
+// TimeRotating way
+const (
+ WhenSecond = iota
+ WhenMinute
+ WhenHour
+ WhenDay
+)
+
+// NewTimeRotatingFileHandler creates a TimeRotatingFileHandler
+func NewTimeRotatingFileHandler(baseName string, when int8, interval int) (*TimeRotatingFileHandler, error) {
+ dir := path.Dir(baseName)
+ os.MkdirAll(dir, 0777)
+
+ h := new(TimeRotatingFileHandler)
+
+ h.baseName = baseName
+
+ switch when {
+ case WhenSecond:
+ h.interval = 1
+ h.suffix = "2006-01-02_15-04-05"
+ case WhenMinute:
+ h.interval = 60
+ h.suffix = "2006-01-02_15-04"
+ case WhenHour:
+ h.interval = 3600
+ h.suffix = "2006-01-02_15"
+ case WhenDay:
+ h.interval = 3600 * 24
+ h.suffix = "2006-01-02"
+ default:
+ return nil, fmt.Errorf("invalid when_rotate: %d", when)
+ }
+
+ h.interval = h.interval * int64(interval)
+
+ var err error
+ h.fd, err = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ return nil, err
+ }
+
+ fInfo, _ := h.fd.Stat()
+ h.rolloverAt = fInfo.ModTime().Unix() + h.interval
+
+ return h, nil
+}
+
+func (h *TimeRotatingFileHandler) doRollover() {
+ //refer http://hg.python.org/cpython/file/2.7/Lib/logging/handlers.py
+ now := time.Now()
+
+ if h.rolloverAt <= now.Unix() {
+ fName := h.baseName + now.Format(h.suffix)
+ h.fd.Close()
+ e := os.Rename(h.baseName, fName)
+ if e != nil {
+ panic(e)
+ }
+
+ h.fd, _ = os.OpenFile(h.baseName, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+
+ h.rolloverAt = time.Now().Unix() + h.interval
+ }
+}
+
+// Write implements Handler interface
+func (h *TimeRotatingFileHandler) Write(b []byte) (n int, err error) {
+ h.doRollover()
+ return h.fd.Write(b)
+}
+
+// Close implements Handler interface
+func (h *TimeRotatingFileHandler) Close() error {
+ return h.fd.Close()
+}
diff --git a/vendor/github.com/siddontang/go-log/log/handler.go b/vendor/github.com/siddontang/go-log/log/handler.go
new file mode 100644
index 000000000..30a6c4f45
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/log/handler.go
@@ -0,0 +1,54 @@
+package log
+
+import (
+ "io"
+)
+
+//Handler writes logs to somewhere
+type Handler interface {
+ Write(p []byte) (n int, err error)
+ Close() error
+}
+
+// StreamHandler writes logs to a specified io Writer, maybe stdout, stderr, etc...
+type StreamHandler struct {
+ w io.Writer
+}
+
+// NewStreamHandler creates a StreamHandler
+func NewStreamHandler(w io.Writer) (*StreamHandler, error) {
+ h := new(StreamHandler)
+
+ h.w = w
+
+ return h, nil
+}
+
+// Write implements Handler interface
+func (h *StreamHandler) Write(b []byte) (n int, err error) {
+ return h.w.Write(b)
+}
+
+// Close implements Handler interface
+func (h *StreamHandler) Close() error {
+ return nil
+}
+
+// NullHandler does nothing, it discards anything.
+type NullHandler struct {
+}
+
+// NewNullHandler creates a NullHandler
+func NewNullHandler() (*NullHandler, error) {
+ return new(NullHandler), nil
+}
+
+// // Write implements Handler interface
+func (h *NullHandler) Write(b []byte) (n int, err error) {
+ return len(b), nil
+}
+
+// Close implements Handler interface
+func (h *NullHandler) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go-log/log/log.go b/vendor/github.com/siddontang/go-log/log/log.go
new file mode 100644
index 000000000..956186d9c
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/log/log.go
@@ -0,0 +1,137 @@
+package log
+
+import (
+ "fmt"
+ "os"
+)
+
+var logger = NewDefault(newStdHandler())
+
+// SetDefaultLogger changes the global logger
+func SetDefaultLogger(l *Logger) {
+ logger = l
+}
+
+// SetLevel changes the logger level
+func SetLevel(level Level) {
+ logger.SetLevel(level)
+}
+
+// SetLevelByName changes the logger level by name
+func SetLevelByName(name string) {
+ logger.SetLevelByName(name)
+}
+
+// Fatal records the log with fatal level and exits
+func Fatal(args ...interface{}) {
+ logger.Output(2, LevelFatal, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+// Fatalf records the log with fatal level and exits
+func Fatalf(format string, args ...interface{}) {
+ logger.Output(2, LevelFatal, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+// Fatalln records the log with fatal level and exits
+func Fatalln(args ...interface{}) {
+ logger.Output(2, LevelFatal, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+// Panic records the log with fatal level and panics
+func Panic(args ...interface{}) {
+ msg := fmt.Sprint(args...)
+ logger.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Panicf records the log with fatal level and panics
+func Panicf(format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ logger.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Panicln records the log with fatal level and panics
+func Panicln(args ...interface{}) {
+ msg := fmt.Sprintln(args...)
+ logger.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Print records the log with trace level
+func Print(args ...interface{}) {
+ logger.Output(2, LevelTrace, fmt.Sprint(args...))
+}
+
+// Printf records the log with trace level
+func Printf(format string, args ...interface{}) {
+ logger.Output(2, LevelTrace, fmt.Sprintf(format, args...))
+}
+
+// Println records the log with trace level
+func Println(args ...interface{}) {
+ logger.Output(2, LevelTrace, fmt.Sprintln(args...))
+}
+
+// Debug records the log with debug level
+func Debug(args ...interface{}) {
+ logger.Output(2, LevelDebug, fmt.Sprint(args...))
+}
+
+// Debugf records the log with debug level
+func Debugf(format string, args ...interface{}) {
+ logger.Output(2, LevelDebug, fmt.Sprintf(format, args...))
+}
+
+// Debugln records the log with debug level
+func Debugln(args ...interface{}) {
+ logger.Output(2, LevelDebug, fmt.Sprintln(args...))
+}
+
+// Error records the log with error level
+func Error(args ...interface{}) {
+ logger.Output(2, LevelError, fmt.Sprint(args...))
+}
+
+// Errorf records the log with error level
+func Errorf(format string, args ...interface{}) {
+ logger.Output(2, LevelError, fmt.Sprintf(format, args...))
+}
+
+// Errorln records the log with error level
+func Errorln(args ...interface{}) {
+ logger.Output(2, LevelError, fmt.Sprintln(args...))
+}
+
+// Info records the log with info level
+func Info(args ...interface{}) {
+ logger.Output(2, LevelInfo, fmt.Sprint(args...))
+}
+
+// Infof records the log with info level
+func Infof(format string, args ...interface{}) {
+ logger.Output(2, LevelInfo, fmt.Sprintf(format, args...))
+}
+
+// Infoln records the log with info level
+func Infoln(args ...interface{}) {
+ logger.Output(2, LevelInfo, fmt.Sprintln(args...))
+}
+
+// Warn records the log with warn level
+func Warn(args ...interface{}) {
+ logger.Output(2, LevelWarn, fmt.Sprint(args...))
+}
+
+// Warnf records the log with warn level
+func Warnf(format string, args ...interface{}) {
+ logger.Output(2, LevelWarn, fmt.Sprintf(format, args...))
+}
+
+// Warnln records the log with warn level
+func Warnln(args ...interface{}) {
+ logger.Output(2, LevelWarn, fmt.Sprintln(args...))
+}
diff --git a/vendor/github.com/siddontang/go-log/log/logger.go b/vendor/github.com/siddontang/go-log/log/logger.go
new file mode 100644
index 000000000..4f44c3e0a
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/log/logger.go
@@ -0,0 +1,301 @@
+package log
+
+import (
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/siddontang/go-log/loggers"
+)
+
+const (
+ timeFormat = "2006/01/02 15:04:05"
+ maxBufPoolSize = 16
+)
+
+// Logger flag
+const (
+ Ltime = 1 << iota // time format "2006/01/02 15:04:05"
+ Lfile // file.go:123
+ Llevel // [Trace|Debug|Info...]
+)
+
+// Level type
+type Level int
+
+// Log level, from low to high, more high means more serious
+const (
+ LevelTrace Level = iota
+ LevelDebug
+ LevelInfo
+ LevelWarn
+ LevelError
+ LevelFatal
+)
+
+// String returns level String
+func (l Level) String() string {
+ switch l {
+ case LevelTrace:
+ return "trace"
+ case LevelDebug:
+ return "debug"
+ case LevelInfo:
+ return "info"
+ case LevelWarn:
+ return "warn"
+ case LevelError:
+ return "error"
+ case LevelFatal:
+ return "fatal"
+ }
+ // return default info
+ return "info"
+}
+
+// Logger is the logger to record log
+type Logger struct {
+ // TODO: support logger.Contextual
+ loggers.Advanced
+
+ level Level
+ flag int
+
+ hLock sync.Mutex
+ handler Handler
+
+ bufs sync.Pool
+}
+
+// New creates a logger with specified handler and flag
+func New(handler Handler, flag int) *Logger {
+ var l = new(Logger)
+
+ l.level = LevelInfo
+ l.handler = handler
+
+ l.flag = flag
+
+ l.bufs = sync.Pool{
+ New: func() interface{} {
+ return make([]byte, 0, 1024)
+ },
+ }
+
+ return l
+}
+
+// NewDefault creates default logger with specified handler and flag: Ltime|Lfile|Llevel
+func NewDefault(handler Handler) *Logger {
+ return New(handler, Ltime|Lfile|Llevel)
+}
+
+func newStdHandler() *StreamHandler {
+ h, _ := NewStreamHandler(os.Stdout)
+ return h
+}
+
+// Close closes the logger
+func (l *Logger) Close() {
+ l.hLock.Lock()
+ defer l.hLock.Unlock()
+ l.handler.Close()
+}
+
+// SetLevel sets log level, any log level less than it will not log
+func (l *Logger) SetLevel(level Level) {
+ l.level = level
+}
+
+// SetLevelByName sets log level by name
+func (l *Logger) SetLevelByName(name string) {
+ level := LevelInfo
+ switch strings.ToLower(name) {
+ case "trace":
+ level = LevelTrace
+ case "debug":
+ level = LevelDebug
+ case "warn", "warning":
+ level = LevelWarn
+ case "error":
+ level = LevelError
+ case "fatal":
+ level = LevelFatal
+ default:
+ level = LevelInfo
+ }
+
+ l.SetLevel(level)
+}
+
+// Output records the log with special callstack depth and log level.
+func (l *Logger) Output(callDepth int, level Level, msg string) {
+ if l.level > level {
+ return
+ }
+
+ buf := l.bufs.Get().([]byte)
+ buf = buf[0:0]
+ defer l.bufs.Put(buf)
+
+ if l.flag&Ltime > 0 {
+ now := time.Now().Format(timeFormat)
+ buf = append(buf, '[')
+ buf = append(buf, now...)
+ buf = append(buf, "] "...)
+ }
+
+ if l.flag&Llevel > 0 {
+ buf = append(buf, '[')
+ buf = append(buf, level.String()...)
+ buf = append(buf, "] "...)
+ }
+
+ if l.flag&Lfile > 0 {
+ _, file, line, ok := runtime.Caller(callDepth)
+ if !ok {
+ file = "???"
+ line = 0
+ } else {
+ for i := len(file) - 1; i > 0; i-- {
+ if file[i] == '/' {
+ file = file[i+1:]
+ break
+ }
+ }
+ }
+
+ buf = append(buf, file...)
+ buf = append(buf, ':')
+
+ buf = strconv.AppendInt(buf, int64(line), 10)
+ buf = append(buf, ' ')
+ }
+
+ buf = append(buf, msg...)
+ if len(msg) == 0 || msg[len(msg)-1] != '\n' {
+ buf = append(buf, '\n')
+ }
+
+ l.hLock.Lock()
+ l.handler.Write(buf)
+ l.hLock.Unlock()
+}
+
+// Fatal records the log with fatal level and exits
+func (l *Logger) Fatal(args ...interface{}) {
+ l.Output(2, LevelFatal, fmt.Sprint(args...))
+ os.Exit(1)
+}
+
+// Fatalf records the log with fatal level and exits
+func (l *Logger) Fatalf(format string, args ...interface{}) {
+ l.Output(2, LevelFatal, fmt.Sprintf(format, args...))
+ os.Exit(1)
+}
+
+// Fatalln records the log with fatal level and exits
+func (l *Logger) Fatalln(args ...interface{}) {
+ l.Output(2, LevelFatal, fmt.Sprintln(args...))
+ os.Exit(1)
+}
+
+// Panic records the log with fatal level and panics
+func (l *Logger) Panic(args ...interface{}) {
+ msg := fmt.Sprint(args...)
+ l.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Panicf records the log with fatal level and panics
+func (l *Logger) Panicf(format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ l.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Panicln records the log with fatal level and panics
+func (l *Logger) Panicln(args ...interface{}) {
+ msg := fmt.Sprintln(args...)
+ l.Output(2, LevelError, msg)
+ panic(msg)
+}
+
+// Print records the log with trace level
+func (l *Logger) Print(args ...interface{}) {
+ l.Output(2, LevelTrace, fmt.Sprint(args...))
+}
+
+// Printf records the log with trace level
+func (l *Logger) Printf(format string, args ...interface{}) {
+ l.Output(2, LevelTrace, fmt.Sprintf(format, args...))
+}
+
+// Println records the log with trace level
+func (l *Logger) Println(args ...interface{}) {
+ l.Output(2, LevelTrace, fmt.Sprintln(args...))
+}
+
+// Debug records the log with debug level
+func (l *Logger) Debug(args ...interface{}) {
+ l.Output(2, LevelDebug, fmt.Sprint(args...))
+}
+
+// Debugf records the log with debug level
+func (l *Logger) Debugf(format string, args ...interface{}) {
+ l.Output(2, LevelDebug, fmt.Sprintf(format, args...))
+}
+
+// Debugln records the log with debug level
+func (l *Logger) Debugln(args ...interface{}) {
+ l.Output(2, LevelDebug, fmt.Sprintln(args...))
+}
+
+// Error records the log with error level
+func (l *Logger) Error(args ...interface{}) {
+ l.Output(2, LevelError, fmt.Sprint(args...))
+}
+
+// Errorf records the log with error level
+func (l *Logger) Errorf(format string, args ...interface{}) {
+ l.Output(2, LevelError, fmt.Sprintf(format, args...))
+}
+
+// Errorln records the log with error level
+func (l *Logger) Errorln(args ...interface{}) {
+ l.Output(2, LevelError, fmt.Sprintln(args...))
+}
+
+// Info records the log with info level
+func (l *Logger) Info(args ...interface{}) {
+ l.Output(2, LevelInfo, fmt.Sprint(args...))
+}
+
+// Infof records the log with info level
+func (l *Logger) Infof(format string, args ...interface{}) {
+ l.Output(2, LevelInfo, fmt.Sprintf(format, args...))
+}
+
+// Infoln records the log with info level
+func (l *Logger) Infoln(args ...interface{}) {
+ l.Output(2, LevelInfo, fmt.Sprintln(args...))
+}
+
+// Warn records the log with warn level
+func (l *Logger) Warn(args ...interface{}) {
+ l.Output(2, LevelWarn, fmt.Sprint(args...))
+}
+
+// Warnf records the log with warn level
+func (l *Logger) Warnf(format string, args ...interface{}) {
+ l.Output(2, LevelWarn, fmt.Sprintf(format, args...))
+}
+
+// Warnln records the log with warn level
+func (l *Logger) Warnln(args ...interface{}) {
+ l.Output(2, LevelWarn, fmt.Sprintln(args...))
+}
diff --git a/vendor/github.com/siddontang/go-log/loggers/loggers.go b/vendor/github.com/siddontang/go-log/loggers/loggers.go
new file mode 100644
index 000000000..2723b24a7
--- /dev/null
+++ b/vendor/github.com/siddontang/go-log/loggers/loggers.go
@@ -0,0 +1,68 @@
+// MIT License
+
+// Copyright (c) 2017 Birkir A. Barkarson
+
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+package loggers
+
+// Standard is the interface used by Go's standard library's log package.
+type Standard interface {
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fatalln(args ...interface{})
+
+ Panic(args ...interface{})
+ Panicf(format string, args ...interface{})
+ Panicln(args ...interface{})
+
+ Print(args ...interface{})
+ Printf(format string, args ...interface{})
+ Println(args ...interface{})
+}
+
+// Advanced is an interface with commonly used log level methods.
+type Advanced interface {
+ Standard
+
+ Debug(args ...interface{})
+ Debugf(format string, args ...interface{})
+ Debugln(args ...interface{})
+
+ Error(args ...interface{})
+ Errorf(format string, args ...interface{})
+ Errorln(args ...interface{})
+
+ Info(args ...interface{})
+ Infof(format string, args ...interface{})
+ Infoln(args ...interface{})
+
+ Warn(args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warnln(args ...interface{})
+}
+
+// Contextual is an interface that allows context addition to a log statement before
+// calling the final print (message/level) method.
+type Contextual interface {
+ Advanced
+
+ WithField(key string, value interface{}) Advanced
+ WithFields(fields ...interface{}) Advanced
+}
diff --git a/vendor/github.com/siddontang/go-mysql/LICENSE b/vendor/github.com/siddontang/go-mysql/LICENSE
new file mode 100644
index 000000000..80511a0a7
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 siddontang
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/const.go b/vendor/github.com/siddontang/go-mysql/mysql/const.go
new file mode 100644
index 000000000..a4862ea73
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/const.go
@@ -0,0 +1,164 @@
+package mysql
+
+const (
+ MinProtocolVersion byte = 10
+ MaxPayloadLen int = 1<<24 - 1
+ TimeFormat string = "2006-01-02 15:04:05"
+)
+
+var (
+ // maybe you can change for your specified name
+ ServerVersion string = "5.7.0"
+)
+
+const (
+ OK_HEADER byte = 0x00
+ ERR_HEADER byte = 0xff
+ EOF_HEADER byte = 0xfe
+ LocalInFile_HEADER byte = 0xfb
+)
+
+const (
+ SERVER_STATUS_IN_TRANS uint16 = 0x0001
+ SERVER_STATUS_AUTOCOMMIT uint16 = 0x0002
+ SERVER_MORE_RESULTS_EXISTS uint16 = 0x0008
+ SERVER_STATUS_NO_GOOD_INDEX_USED uint16 = 0x0010
+ SERVER_STATUS_NO_INDEX_USED uint16 = 0x0020
+ SERVER_STATUS_CURSOR_EXISTS uint16 = 0x0040
+ SERVER_STATUS_LAST_ROW_SEND uint16 = 0x0080
+ SERVER_STATUS_DB_DROPPED uint16 = 0x0100
+ SERVER_STATUS_NO_BACKSLASH_ESCAPED uint16 = 0x0200
+ SERVER_STATUS_METADATA_CHANGED uint16 = 0x0400
+ SERVER_QUERY_WAS_SLOW uint16 = 0x0800
+ SERVER_PS_OUT_PARAMS uint16 = 0x1000
+)
+
+const (
+ COM_SLEEP byte = iota
+ COM_QUIT
+ COM_INIT_DB
+ COM_QUERY
+ COM_FIELD_LIST
+ COM_CREATE_DB
+ COM_DROP_DB
+ COM_REFRESH
+ COM_SHUTDOWN
+ COM_STATISTICS
+ COM_PROCESS_INFO
+ COM_CONNECT
+ COM_PROCESS_KILL
+ COM_DEBUG
+ COM_PING
+ COM_TIME
+ COM_DELAYED_INSERT
+ COM_CHANGE_USER
+ COM_BINLOG_DUMP
+ COM_TABLE_DUMP
+ COM_CONNECT_OUT
+ COM_REGISTER_SLAVE
+ COM_STMT_PREPARE
+ COM_STMT_EXECUTE
+ COM_STMT_SEND_LONG_DATA
+ COM_STMT_CLOSE
+ COM_STMT_RESET
+ COM_SET_OPTION
+ COM_STMT_FETCH
+ COM_DAEMON
+ COM_BINLOG_DUMP_GTID
+ COM_RESET_CONNECTION
+)
+
+const (
+ CLIENT_LONG_PASSWORD uint32 = 1 << iota
+ CLIENT_FOUND_ROWS
+ CLIENT_LONG_FLAG
+ CLIENT_CONNECT_WITH_DB
+ CLIENT_NO_SCHEMA
+ CLIENT_COMPRESS
+ CLIENT_ODBC
+ CLIENT_LOCAL_FILES
+ CLIENT_IGNORE_SPACE
+ CLIENT_PROTOCOL_41
+ CLIENT_INTERACTIVE
+ CLIENT_SSL
+ CLIENT_IGNORE_SIGPIPE
+ CLIENT_TRANSACTIONS
+ CLIENT_RESERVED
+ CLIENT_SECURE_CONNECTION
+ CLIENT_MULTI_STATEMENTS
+ CLIENT_MULTI_RESULTS
+ CLIENT_PS_MULTI_RESULTS
+ CLIENT_PLUGIN_AUTH
+ CLIENT_CONNECT_ATTRS
+ CLIENT_PLUGIN_AUTH_LENENC_CLIENT_DATA
+)
+
+const (
+ MYSQL_TYPE_DECIMAL byte = iota
+ MYSQL_TYPE_TINY
+ MYSQL_TYPE_SHORT
+ MYSQL_TYPE_LONG
+ MYSQL_TYPE_FLOAT
+ MYSQL_TYPE_DOUBLE
+ MYSQL_TYPE_NULL
+ MYSQL_TYPE_TIMESTAMP
+ MYSQL_TYPE_LONGLONG
+ MYSQL_TYPE_INT24
+ MYSQL_TYPE_DATE
+ MYSQL_TYPE_TIME
+ MYSQL_TYPE_DATETIME
+ MYSQL_TYPE_YEAR
+ MYSQL_TYPE_NEWDATE
+ MYSQL_TYPE_VARCHAR
+ MYSQL_TYPE_BIT
+
+ //mysql 5.6
+ MYSQL_TYPE_TIMESTAMP2
+ MYSQL_TYPE_DATETIME2
+ MYSQL_TYPE_TIME2
+)
+
+const (
+ MYSQL_TYPE_JSON byte = iota + 0xf5
+ MYSQL_TYPE_NEWDECIMAL
+ MYSQL_TYPE_ENUM
+ MYSQL_TYPE_SET
+ MYSQL_TYPE_TINY_BLOB
+ MYSQL_TYPE_MEDIUM_BLOB
+ MYSQL_TYPE_LONG_BLOB
+ MYSQL_TYPE_BLOB
+ MYSQL_TYPE_VAR_STRING
+ MYSQL_TYPE_STRING
+ MYSQL_TYPE_GEOMETRY
+)
+
+const (
+ NOT_NULL_FLAG = 1
+ PRI_KEY_FLAG = 2
+ UNIQUE_KEY_FLAG = 4
+ BLOB_FLAG = 16
+ UNSIGNED_FLAG = 32
+ ZEROFILL_FLAG = 64
+ BINARY_FLAG = 128
+ ENUM_FLAG = 256
+ AUTO_INCREMENT_FLAG = 512
+ TIMESTAMP_FLAG = 1024
+ SET_FLAG = 2048
+ NUM_FLAG = 32768
+ PART_KEY_FLAG = 16384
+ GROUP_FLAG = 32768
+ UNIQUE_FLAG = 65536
+)
+
+const (
+ AUTH_NAME = "mysql_native_password"
+ DEFAULT_CHARSET = "utf8"
+ DEFAULT_COLLATION_ID uint8 = 33
+ DEFAULT_COLLATION_NAME string = "utf8_general_ci"
+)
+
+// Like vitess, use flavor for different MySQL versions,
+const (
+ MySQLFlavor = "mysql"
+ MariaDBFlavor = "mariadb"
+)
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/errcode.go b/vendor/github.com/siddontang/go-mysql/mysql/errcode.go
new file mode 100644
index 000000000..8acff1ae9
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/errcode.go
@@ -0,0 +1,870 @@
+package mysql
+
+const (
+ ER_ERROR_FIRST uint16 = 1000
+ ER_HASHCHK = 1000
+ ER_NISAMCHK = 1001
+ ER_NO = 1002
+ ER_YES = 1003
+ ER_CANT_CREATE_FILE = 1004
+ ER_CANT_CREATE_TABLE = 1005
+ ER_CANT_CREATE_DB = 1006
+ ER_DB_CREATE_EXISTS = 1007
+ ER_DB_DROP_EXISTS = 1008
+ ER_DB_DROP_DELETE = 1009
+ ER_DB_DROP_RMDIR = 1010
+ ER_CANT_DELETE_FILE = 1011
+ ER_CANT_FIND_SYSTEM_REC = 1012
+ ER_CANT_GET_STAT = 1013
+ ER_CANT_GET_WD = 1014
+ ER_CANT_LOCK = 1015
+ ER_CANT_OPEN_FILE = 1016
+ ER_FILE_NOT_FOUND = 1017
+ ER_CANT_READ_DIR = 1018
+ ER_CANT_SET_WD = 1019
+ ER_CHECKREAD = 1020
+ ER_DISK_FULL = 1021
+ ER_DUP_KEY = 1022
+ ER_ERROR_ON_CLOSE = 1023
+ ER_ERROR_ON_READ = 1024
+ ER_ERROR_ON_RENAME = 1025
+ ER_ERROR_ON_WRITE = 1026
+ ER_FILE_USED = 1027
+ ER_FILSORT_ABORT = 1028
+ ER_FORM_NOT_FOUND = 1029
+ ER_GET_ERRNO = 1030
+ ER_ILLEGAL_HA = 1031
+ ER_KEY_NOT_FOUND = 1032
+ ER_NOT_FORM_FILE = 1033
+ ER_NOT_KEYFILE = 1034
+ ER_OLD_KEYFILE = 1035
+ ER_OPEN_AS_READONLY = 1036
+ ER_OUTOFMEMORY = 1037
+ ER_OUT_OF_SORTMEMORY = 1038
+ ER_UNEXPECTED_EOF = 1039
+ ER_CON_COUNT_ERROR = 1040
+ ER_OUT_OF_RESOURCES = 1041
+ ER_BAD_HOST_ERROR = 1042
+ ER_HANDSHAKE_ERROR = 1043
+ ER_DBACCESS_DENIED_ERROR = 1044
+ ER_ACCESS_DENIED_ERROR = 1045
+ ER_NO_DB_ERROR = 1046
+ ER_UNKNOWN_COM_ERROR = 1047
+ ER_BAD_NULL_ERROR = 1048
+ ER_BAD_DB_ERROR = 1049
+ ER_TABLE_EXISTS_ERROR = 1050
+ ER_BAD_TABLE_ERROR = 1051
+ ER_NON_UNIQ_ERROR = 1052
+ ER_SERVER_SHUTDOWN = 1053
+ ER_BAD_FIELD_ERROR = 1054
+ ER_WRONG_FIELD_WITH_GROUP = 1055
+ ER_WRONG_GROUP_FIELD = 1056
+ ER_WRONG_SUM_SELECT = 1057
+ ER_WRONG_VALUE_COUNT = 1058
+ ER_TOO_LONG_IDENT = 1059
+ ER_DUP_FIELDNAME = 1060
+ ER_DUP_KEYNAME = 1061
+ ER_DUP_ENTRY = 1062
+ ER_WRONG_FIELD_SPEC = 1063
+ ER_PARSE_ERROR = 1064
+ ER_EMPTY_QUERY = 1065
+ ER_NONUNIQ_TABLE = 1066
+ ER_INVALID_DEFAULT = 1067
+ ER_MULTIPLE_PRI_KEY = 1068
+ ER_TOO_MANY_KEYS = 1069
+ ER_TOO_MANY_KEY_PARTS = 1070
+ ER_TOO_LONG_KEY = 1071
+ ER_KEY_COLUMN_DOES_NOT_EXITS = 1072
+ ER_BLOB_USED_AS_KEY = 1073
+ ER_TOO_BIG_FIELDLENGTH = 1074
+ ER_WRONG_AUTO_KEY = 1075
+ ER_READY = 1076
+ ER_NORMAL_SHUTDOWN = 1077
+ ER_GOT_SIGNAL = 1078
+ ER_SHUTDOWN_COMPLETE = 1079
+ ER_FORCING_CLOSE = 1080
+ ER_IPSOCK_ERROR = 1081
+ ER_NO_SUCH_INDEX = 1082
+ ER_WRONG_FIELD_TERMINATORS = 1083
+ ER_BLOBS_AND_NO_TERMINATED = 1084
+ ER_TEXTFILE_NOT_READABLE = 1085
+ ER_FILE_EXISTS_ERROR = 1086
+ ER_LOAD_INFO = 1087
+ ER_ALTER_INFO = 1088
+ ER_WRONG_SUB_KEY = 1089
+ ER_CANT_REMOVE_ALL_FIELDS = 1090
+ ER_CANT_DROP_FIELD_OR_KEY = 1091
+ ER_INSERT_INFO = 1092
+ ER_UPDATE_TABLE_USED = 1093
+ ER_NO_SUCH_THREAD = 1094
+ ER_KILL_DENIED_ERROR = 1095
+ ER_NO_TABLES_USED = 1096
+ ER_TOO_BIG_SET = 1097
+ ER_NO_UNIQUE_LOGFILE = 1098
+ ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099
+ ER_TABLE_NOT_LOCKED = 1100
+ ER_BLOB_CANT_HAVE_DEFAULT = 1101
+ ER_WRONG_DB_NAME = 1102
+ ER_WRONG_TABLE_NAME = 1103
+ ER_TOO_BIG_SELECT = 1104
+ ER_UNKNOWN_ERROR = 1105
+ ER_UNKNOWN_PROCEDURE = 1106
+ ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107
+ ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108
+ ER_UNKNOWN_TABLE = 1109
+ ER_FIELD_SPECIFIED_TWICE = 1110
+ ER_INVALID_GROUP_FUNC_USE = 1111
+ ER_UNSUPPORTED_EXTENSION = 1112
+ ER_TABLE_MUST_HAVE_COLUMNS = 1113
+ ER_RECORD_FILE_FULL = 1114
+ ER_UNKNOWN_CHARACTER_SET = 1115
+ ER_TOO_MANY_TABLES = 1116
+ ER_TOO_MANY_FIELDS = 1117
+ ER_TOO_BIG_ROWSIZE = 1118
+ ER_STACK_OVERRUN = 1119
+ ER_WRONG_OUTER_JOIN = 1120
+ ER_NULL_COLUMN_IN_INDEX = 1121
+ ER_CANT_FIND_UDF = 1122
+ ER_CANT_INITIALIZE_UDF = 1123
+ ER_UDF_NO_PATHS = 1124
+ ER_UDF_EXISTS = 1125
+ ER_CANT_OPEN_LIBRARY = 1126
+ ER_CANT_FIND_DL_ENTRY = 1127
+ ER_FUNCTION_NOT_DEFINED = 1128
+ ER_HOST_IS_BLOCKED = 1129
+ ER_HOST_NOT_PRIVILEGED = 1130
+ ER_PASSWORD_ANONYMOUS_USER = 1131
+ ER_PASSWORD_NOT_ALLOWED = 1132
+ ER_PASSWORD_NO_MATCH = 1133
+ ER_UPDATE_INFO = 1134
+ ER_CANT_CREATE_THREAD = 1135
+ ER_WRONG_VALUE_COUNT_ON_ROW = 1136
+ ER_CANT_REOPEN_TABLE = 1137
+ ER_INVALID_USE_OF_NULL = 1138
+ ER_REGEXP_ERROR = 1139
+ ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140
+ ER_NONEXISTING_GRANT = 1141
+ ER_TABLEACCESS_DENIED_ERROR = 1142
+ ER_COLUMNACCESS_DENIED_ERROR = 1143
+ ER_ILLEGAL_GRANT_FOR_TABLE = 1144
+ ER_GRANT_WRONG_HOST_OR_USER = 1145
+ ER_NO_SUCH_TABLE = 1146
+ ER_NONEXISTING_TABLE_GRANT = 1147
+ ER_NOT_ALLOWED_COMMAND = 1148
+ ER_SYNTAX_ERROR = 1149
+ ER_DELAYED_CANT_CHANGE_LOCK = 1150
+ ER_TOO_MANY_DELAYED_THREADS = 1151
+ ER_ABORTING_CONNECTION = 1152
+ ER_NET_PACKET_TOO_LARGE = 1153
+ ER_NET_READ_ERROR_FROM_PIPE = 1154
+ ER_NET_FCNTL_ERROR = 1155
+ ER_NET_PACKETS_OUT_OF_ORDER = 1156
+ ER_NET_UNCOMPRESS_ERROR = 1157
+ ER_NET_READ_ERROR = 1158
+ ER_NET_READ_INTERRUPTED = 1159
+ ER_NET_ERROR_ON_WRITE = 1160
+ ER_NET_WRITE_INTERRUPTED = 1161
+ ER_TOO_LONG_STRING = 1162
+ ER_TABLE_CANT_HANDLE_BLOB = 1163
+ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164
+ ER_DELAYED_INSERT_TABLE_LOCKED = 1165
+ ER_WRONG_COLUMN_NAME = 1166
+ ER_WRONG_KEY_COLUMN = 1167
+ ER_WRONG_MRG_TABLE = 1168
+ ER_DUP_UNIQUE = 1169
+ ER_BLOB_KEY_WITHOUT_LENGTH = 1170
+ ER_PRIMARY_CANT_HAVE_NULL = 1171
+ ER_TOO_MANY_ROWS = 1172
+ ER_REQUIRES_PRIMARY_KEY = 1173
+ ER_NO_RAID_COMPILED = 1174
+ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175
+ ER_KEY_DOES_NOT_EXITS = 1176
+ ER_CHECK_NO_SUCH_TABLE = 1177
+ ER_CHECK_NOT_IMPLEMENTED = 1178
+ ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179
+ ER_ERROR_DURING_COMMIT = 1180
+ ER_ERROR_DURING_ROLLBACK = 1181
+ ER_ERROR_DURING_FLUSH_LOGS = 1182
+ ER_ERROR_DURING_CHECKPOINT = 1183
+ ER_NEW_ABORTING_CONNECTION = 1184
+ ER_DUMP_NOT_IMPLEMENTED = 1185
+ ER_FLUSH_MASTER_BINLOG_CLOSED = 1186
+ ER_INDEX_REBUILD = 1187
+ ER_MASTER = 1188
+ ER_MASTER_NET_READ = 1189
+ ER_MASTER_NET_WRITE = 1190
+ ER_FT_MATCHING_KEY_NOT_FOUND = 1191
+ ER_LOCK_OR_ACTIVE_TRANSACTION = 1192
+ ER_UNKNOWN_SYSTEM_VARIABLE = 1193
+ ER_CRASHED_ON_USAGE = 1194
+ ER_CRASHED_ON_REPAIR = 1195
+ ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196
+ ER_TRANS_CACHE_FULL = 1197
+ ER_SLAVE_MUST_STOP = 1198
+ ER_SLAVE_NOT_RUNNING = 1199
+ ER_BAD_SLAVE = 1200
+ ER_MASTER_INFO = 1201
+ ER_SLAVE_THREAD = 1202
+ ER_TOO_MANY_USER_CONNECTIONS = 1203
+ ER_SET_CONSTANTS_ONLY = 1204
+ ER_LOCK_WAIT_TIMEOUT = 1205
+ ER_LOCK_TABLE_FULL = 1206
+ ER_READ_ONLY_TRANSACTION = 1207
+ ER_DROP_DB_WITH_READ_LOCK = 1208
+ ER_CREATE_DB_WITH_READ_LOCK = 1209
+ ER_WRONG_ARGUMENTS = 1210
+ ER_NO_PERMISSION_TO_CREATE_USER = 1211
+ ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212
+ ER_LOCK_DEADLOCK = 1213
+ ER_TABLE_CANT_HANDLE_FT = 1214
+ ER_CANNOT_ADD_FOREIGN = 1215
+ ER_NO_REFERENCED_ROW = 1216
+ ER_ROW_IS_REFERENCED = 1217
+ ER_CONNECT_TO_MASTER = 1218
+ ER_QUERY_ON_MASTER = 1219
+ ER_ERROR_WHEN_EXECUTING_COMMAND = 1220
+ ER_WRONG_USAGE = 1221
+ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222
+ ER_CANT_UPDATE_WITH_READLOCK = 1223
+ ER_MIXING_NOT_ALLOWED = 1224
+ ER_DUP_ARGUMENT = 1225
+ ER_USER_LIMIT_REACHED = 1226
+ ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227
+ ER_LOCAL_VARIABLE = 1228
+ ER_GLOBAL_VARIABLE = 1229
+ ER_NO_DEFAULT = 1230
+ ER_WRONG_VALUE_FOR_VAR = 1231
+ ER_WRONG_TYPE_FOR_VAR = 1232
+ ER_VAR_CANT_BE_READ = 1233
+ ER_CANT_USE_OPTION_HERE = 1234
+ ER_NOT_SUPPORTED_YET = 1235
+ ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236
+ ER_SLAVE_IGNORED_TABLE = 1237
+ ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238
+ ER_WRONG_FK_DEF = 1239
+ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240
+ ER_OPERAND_COLUMNS = 1241
+ ER_SUBQUERY_NO_1_ROW = 1242
+ ER_UNKNOWN_STMT_HANDLER = 1243
+ ER_CORRUPT_HELP_DB = 1244
+ ER_CYCLIC_REFERENCE = 1245
+ ER_AUTO_CONVERT = 1246
+ ER_ILLEGAL_REFERENCE = 1247
+ ER_DERIVED_MUST_HAVE_ALIAS = 1248
+ ER_SELECT_REDUCED = 1249
+ ER_TABLENAME_NOT_ALLOWED_HERE = 1250
+ ER_NOT_SUPPORTED_AUTH_MODE = 1251
+ ER_SPATIAL_CANT_HAVE_NULL = 1252
+ ER_COLLATION_CHARSET_MISMATCH = 1253
+ ER_SLAVE_WAS_RUNNING = 1254
+ ER_SLAVE_WAS_NOT_RUNNING = 1255
+ ER_TOO_BIG_FOR_UNCOMPRESS = 1256
+ ER_ZLIB_Z_MEM_ERROR = 1257
+ ER_ZLIB_Z_BUF_ERROR = 1258
+ ER_ZLIB_Z_DATA_ERROR = 1259
+ ER_CUT_VALUE_GROUP_CONCAT = 1260
+ ER_WARN_TOO_FEW_RECORDS = 1261
+ ER_WARN_TOO_MANY_RECORDS = 1262
+ ER_WARN_NULL_TO_NOTNULL = 1263
+ ER_WARN_DATA_OUT_OF_RANGE = 1264
+ WARN_DATA_TRUNCATED = 1265
+ ER_WARN_USING_OTHER_HANDLER = 1266
+ ER_CANT_AGGREGATE_2COLLATIONS = 1267
+ ER_DROP_USER = 1268
+ ER_REVOKE_GRANTS = 1269
+ ER_CANT_AGGREGATE_3COLLATIONS = 1270
+ ER_CANT_AGGREGATE_NCOLLATIONS = 1271
+ ER_VARIABLE_IS_NOT_STRUCT = 1272
+ ER_UNKNOWN_COLLATION = 1273
+ ER_SLAVE_IGNORED_SSL_PARAMS = 1274
+ ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275
+ ER_WARN_FIELD_RESOLVED = 1276
+ ER_BAD_SLAVE_UNTIL_COND = 1277
+ ER_MISSING_SKIP_SLAVE = 1278
+ ER_UNTIL_COND_IGNORED = 1279
+ ER_WRONG_NAME_FOR_INDEX = 1280
+ ER_WRONG_NAME_FOR_CATALOG = 1281
+ ER_WARN_QC_RESIZE = 1282
+ ER_BAD_FT_COLUMN = 1283
+ ER_UNKNOWN_KEY_CACHE = 1284
+ ER_WARN_HOSTNAME_WONT_WORK = 1285
+ ER_UNKNOWN_STORAGE_ENGINE = 1286
+ ER_WARN_DEPRECATED_SYNTAX = 1287
+ ER_NON_UPDATABLE_TABLE = 1288
+ ER_FEATURE_DISABLED = 1289
+ ER_OPTION_PREVENTS_STATEMENT = 1290
+ ER_DUPLICATED_VALUE_IN_TYPE = 1291
+ ER_TRUNCATED_WRONG_VALUE = 1292
+ ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293
+ ER_INVALID_ON_UPDATE = 1294
+ ER_UNSUPPORTED_PS = 1295
+ ER_GET_ERRMSG = 1296
+ ER_GET_TEMPORARY_ERRMSG = 1297
+ ER_UNKNOWN_TIME_ZONE = 1298
+ ER_WARN_INVALID_TIMESTAMP = 1299
+ ER_INVALID_CHARACTER_STRING = 1300
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301
+ ER_CONFLICTING_DECLARATIONS = 1302
+ ER_SP_NO_RECURSIVE_CREATE = 1303
+ ER_SP_ALREADY_EXISTS = 1304
+ ER_SP_DOES_NOT_EXIST = 1305
+ ER_SP_DROP_FAILED = 1306
+ ER_SP_STORE_FAILED = 1307
+ ER_SP_LILABEL_MISMATCH = 1308
+ ER_SP_LABEL_REDEFINE = 1309
+ ER_SP_LABEL_MISMATCH = 1310
+ ER_SP_UNINIT_VAR = 1311
+ ER_SP_BADSELECT = 1312
+ ER_SP_BADRETURN = 1313
+ ER_SP_BADSTATEMENT = 1314
+ ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315
+ ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316
+ ER_QUERY_INTERRUPTED = 1317
+ ER_SP_WRONG_NO_OF_ARGS = 1318
+ ER_SP_COND_MISMATCH = 1319
+ ER_SP_NORETURN = 1320
+ ER_SP_NORETURNEND = 1321
+ ER_SP_BAD_CURSOR_QUERY = 1322
+ ER_SP_BAD_CURSOR_SELECT = 1323
+ ER_SP_CURSOR_MISMATCH = 1324
+ ER_SP_CURSOR_ALREADY_OPEN = 1325
+ ER_SP_CURSOR_NOT_OPEN = 1326
+ ER_SP_UNDECLARED_VAR = 1327
+ ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328
+ ER_SP_FETCH_NO_DATA = 1329
+ ER_SP_DUP_PARAM = 1330
+ ER_SP_DUP_VAR = 1331
+ ER_SP_DUP_COND = 1332
+ ER_SP_DUP_CURS = 1333
+ ER_SP_CANT_ALTER = 1334
+ ER_SP_SUBSELECT_NYI = 1335
+ ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336
+ ER_SP_VARCOND_AFTER_CURSHNDLR = 1337
+ ER_SP_CURSOR_AFTER_HANDLER = 1338
+ ER_SP_CASE_NOT_FOUND = 1339
+ ER_FPARSER_TOO_BIG_FILE = 1340
+ ER_FPARSER_BAD_HEADER = 1341
+ ER_FPARSER_EOF_IN_COMMENT = 1342
+ ER_FPARSER_ERROR_IN_PARAMETER = 1343
+ ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344
+ ER_VIEW_NO_EXPLAIN = 1345
+ ER_FRM_UNKNOWN_TYPE = 1346
+ ER_WRONG_OBJECT = 1347
+ ER_NONUPDATEABLE_COLUMN = 1348
+ ER_VIEW_SELECT_DERIVED = 1349
+ ER_VIEW_SELECT_CLAUSE = 1350
+ ER_VIEW_SELECT_VARIABLE = 1351
+ ER_VIEW_SELECT_TMPTABLE = 1352
+ ER_VIEW_WRONG_LIST = 1353
+ ER_WARN_VIEW_MERGE = 1354
+ ER_WARN_VIEW_WITHOUT_KEY = 1355
+ ER_VIEW_INVALID = 1356
+ ER_SP_NO_DROP_SP = 1357
+ ER_SP_GOTO_IN_HNDLR = 1358
+ ER_TRG_ALREADY_EXISTS = 1359
+ ER_TRG_DOES_NOT_EXIST = 1360
+ ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361
+ ER_TRG_CANT_CHANGE_ROW = 1362
+ ER_TRG_NO_SUCH_ROW_IN_TRG = 1363
+ ER_NO_DEFAULT_FOR_FIELD = 1364
+ ER_DIVISION_BY_ZERO = 1365
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366
+ ER_ILLEGAL_VALUE_FOR_TYPE = 1367
+ ER_VIEW_NONUPD_CHECK = 1368
+ ER_VIEW_CHECK_FAILED = 1369
+ ER_PROCACCESS_DENIED_ERROR = 1370
+ ER_RELAY_LOG_FAIL = 1371
+ ER_PASSWD_LENGTH = 1372
+ ER_UNKNOWN_TARGET_BINLOG = 1373
+ ER_IO_ERR_LOG_INDEX_READ = 1374
+ ER_BINLOG_PURGE_PROHIBITED = 1375
+ ER_FSEEK_FAIL = 1376
+ ER_BINLOG_PURGE_FATAL_ERR = 1377
+ ER_LOG_IN_USE = 1378
+ ER_LOG_PURGE_UNKNOWN_ERR = 1379
+ ER_RELAY_LOG_INIT = 1380
+ ER_NO_BINARY_LOGGING = 1381
+ ER_RESERVED_SYNTAX = 1382
+ ER_WSAS_FAILED = 1383
+ ER_DIFF_GROUPS_PROC = 1384
+ ER_NO_GROUP_FOR_PROC = 1385
+ ER_ORDER_WITH_PROC = 1386
+ ER_LOGGING_PROHIBIT_CHANGING_OF = 1387
+ ER_NO_FILE_MAPPING = 1388
+ ER_WRONG_MAGIC = 1389
+ ER_PS_MANY_PARAM = 1390
+ ER_KEY_PART_0 = 1391
+ ER_VIEW_CHECKSUM = 1392
+ ER_VIEW_MULTIUPDATE = 1393
+ ER_VIEW_NO_INSERT_FIELD_LIST = 1394
+ ER_VIEW_DELETE_MERGE_VIEW = 1395
+ ER_CANNOT_USER = 1396
+ ER_XAER_NOTA = 1397
+ ER_XAER_INVAL = 1398
+ ER_XAER_RMFAIL = 1399
+ ER_XAER_OUTSIDE = 1400
+ ER_XAER_RMERR = 1401
+ ER_XA_RBROLLBACK = 1402
+ ER_NONEXISTING_PROC_GRANT = 1403
+ ER_PROC_AUTO_GRANT_FAIL = 1404
+ ER_PROC_AUTO_REVOKE_FAIL = 1405
+ ER_DATA_TOO_LONG = 1406
+ ER_SP_BAD_SQLSTATE = 1407
+ ER_STARTUP = 1408
+ ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409
+ ER_CANT_CREATE_USER_WITH_GRANT = 1410
+ ER_WRONG_VALUE_FOR_TYPE = 1411
+ ER_TABLE_DEF_CHANGED = 1412
+ ER_SP_DUP_HANDLER = 1413
+ ER_SP_NOT_VAR_ARG = 1414
+ ER_SP_NO_RETSET = 1415
+ ER_CANT_CREATE_GEOMETRY_OBJECT = 1416
+ ER_FAILED_ROUTINE_BREAK_BINLOG = 1417
+ ER_BINLOG_UNSAFE_ROUTINE = 1418
+ ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419
+ ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420
+ ER_STMT_HAS_NO_OPEN_CURSOR = 1421
+ ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422
+ ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423
+ ER_SP_NO_RECURSION = 1424
+ ER_TOO_BIG_SCALE = 1425
+ ER_TOO_BIG_PRECISION = 1426
+ ER_M_BIGGER_THAN_D = 1427
+ ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428
+ ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429
+ ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430
+ ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431
+ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432
+ ER_FOREIGN_DATA_STRING_INVALID = 1433
+ ER_CANT_CREATE_FEDERATED_TABLE = 1434
+ ER_TRG_IN_WRONG_SCHEMA = 1435
+ ER_STACK_OVERRUN_NEED_MORE = 1436
+ ER_TOO_LONG_BODY = 1437
+ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438
+ ER_TOO_BIG_DISPLAYWIDTH = 1439
+ ER_XAER_DUPID = 1440
+ ER_DATETIME_FUNCTION_OVERFLOW = 1441
+ ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442
+ ER_VIEW_PREVENT_UPDATE = 1443
+ ER_PS_NO_RECURSION = 1444
+ ER_SP_CANT_SET_AUTOCOMMIT = 1445
+ ER_MALFORMED_DEFINER = 1446
+ ER_VIEW_FRM_NO_USER = 1447
+ ER_VIEW_OTHER_USER = 1448
+ ER_NO_SUCH_USER = 1449
+ ER_FORBID_SCHEMA_CHANGE = 1450
+ ER_ROW_IS_REFERENCED_2 = 1451
+ ER_NO_REFERENCED_ROW_2 = 1452
+ ER_SP_BAD_VAR_SHADOW = 1453
+ ER_TRG_NO_DEFINER = 1454
+ ER_OLD_FILE_FORMAT = 1455
+ ER_SP_RECURSION_LIMIT = 1456
+ ER_SP_PROC_TABLE_CORRUPT = 1457
+ ER_SP_WRONG_NAME = 1458
+ ER_TABLE_NEEDS_UPGRADE = 1459
+ ER_SP_NO_AGGREGATE = 1460
+ ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461
+ ER_VIEW_RECURSIVE = 1462
+ ER_NON_GROUPING_FIELD_USED = 1463
+ ER_TABLE_CANT_HANDLE_SPKEYS = 1464
+ ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465
+ ER_REMOVED_SPACES = 1466
+ ER_AUTOINC_READ_FAILED = 1467
+ ER_USERNAME = 1468
+ ER_HOSTNAME = 1469
+ ER_WRONG_STRING_LENGTH = 1470
+ ER_NON_INSERTABLE_TABLE = 1471
+ ER_ADMIN_WRONG_MRG_TABLE = 1472
+ ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473
+ ER_NAME_BECOMES_EMPTY = 1474
+ ER_AMBIGUOUS_FIELD_TERM = 1475
+ ER_FOREIGN_SERVER_EXISTS = 1476
+ ER_FOREIGN_SERVER_DOESNT_EXIST = 1477
+ ER_ILLEGAL_HA_CREATE_OPTION = 1478
+ ER_PARTITION_REQUIRES_VALUES_ERROR = 1479
+ ER_PARTITION_WRONG_VALUES_ERROR = 1480
+ ER_PARTITION_MAXVALUE_ERROR = 1481
+ ER_PARTITION_SUBPARTITION_ERROR = 1482
+ ER_PARTITION_SUBPART_MIX_ERROR = 1483
+ ER_PARTITION_WRONG_NO_PART_ERROR = 1484
+ ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485
+ ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486
+ ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487
+ ER_FIELD_NOT_FOUND_PART_ERROR = 1488
+ ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489
+ ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490
+ ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491
+ ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492
+ ER_RANGE_NOT_INCREASING_ERROR = 1493
+ ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494
+ ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495
+ ER_PARTITION_ENTRY_ERROR = 1496
+ ER_MIX_HANDLER_ERROR = 1497
+ ER_PARTITION_NOT_DEFINED_ERROR = 1498
+ ER_TOO_MANY_PARTITIONS_ERROR = 1499
+ ER_SUBPARTITION_ERROR = 1500
+ ER_CANT_CREATE_HANDLER_FILE = 1501
+ ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502
+ ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503
+ ER_NO_PARTS_ERROR = 1504
+ ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505
+ ER_FOREIGN_KEY_ON_PARTITIONED = 1506
+ ER_DROP_PARTITION_NON_EXISTENT = 1507
+ ER_DROP_LAST_PARTITION = 1508
+ ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509
+ ER_REORG_HASH_ONLY_ON_SAME_NO = 1510
+ ER_REORG_NO_PARAM_ERROR = 1511
+ ER_ONLY_ON_RANGE_LIST_PARTITION = 1512
+ ER_ADD_PARTITION_SUBPART_ERROR = 1513
+ ER_ADD_PARTITION_NO_NEW_PARTITION = 1514
+ ER_COALESCE_PARTITION_NO_PARTITION = 1515
+ ER_REORG_PARTITION_NOT_EXIST = 1516
+ ER_SAME_NAME_PARTITION = 1517
+ ER_NO_BINLOG_ERROR = 1518
+ ER_CONSECUTIVE_REORG_PARTITIONS = 1519
+ ER_REORG_OUTSIDE_RANGE = 1520
+ ER_PARTITION_FUNCTION_FAILURE = 1521
+ ER_PART_STATE_ERROR = 1522
+ ER_LIMITED_PART_RANGE = 1523
+ ER_PLUGIN_IS_NOT_LOADED = 1524
+ ER_WRONG_VALUE = 1525
+ ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526
+ ER_FILEGROUP_OPTION_ONLY_ONCE = 1527
+ ER_CREATE_FILEGROUP_FAILED = 1528
+ ER_DROP_FILEGROUP_FAILED = 1529
+ ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530
+ ER_WRONG_SIZE_NUMBER = 1531
+ ER_SIZE_OVERFLOW_ERROR = 1532
+ ER_ALTER_FILEGROUP_FAILED = 1533
+ ER_BINLOG_ROW_LOGGING_FAILED = 1534
+ ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535
+ ER_BINLOG_ROW_RBR_TO_SBR = 1536
+ ER_EVENT_ALREADY_EXISTS = 1537
+ ER_EVENT_STORE_FAILED = 1538
+ ER_EVENT_DOES_NOT_EXIST = 1539
+ ER_EVENT_CANT_ALTER = 1540
+ ER_EVENT_DROP_FAILED = 1541
+ ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542
+ ER_EVENT_ENDS_BEFORE_STARTS = 1543
+ ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544
+ ER_EVENT_OPEN_TABLE_FAILED = 1545
+ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546
+ ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547
+ ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE = 1548
+ ER_EVENT_CANNOT_DELETE = 1549
+ ER_EVENT_COMPILE_ERROR = 1550
+ ER_EVENT_SAME_NAME = 1551
+ ER_EVENT_DATA_TOO_LONG = 1552
+ ER_DROP_INDEX_FK = 1553
+ ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554
+ ER_CANT_WRITE_LOCK_LOG_TABLE = 1555
+ ER_CANT_LOCK_LOG_TABLE = 1556
+ ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED = 1557
+ ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558
+ ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560
+ ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561
+ ER_PARTITION_NO_TEMPORARY = 1562
+ ER_PARTITION_CONST_DOMAIN_ERROR = 1563
+ ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564
+ ER_DDL_LOG_ERROR = 1565
+ ER_NULL_IN_VALUES_LESS_THAN = 1566
+ ER_WRONG_PARTITION_NAME = 1567
+ ER_CANT_CHANGE_TX_CHARACTERISTICS = 1568
+ ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569
+ ER_EVENT_MODIFY_QUEUE_ERROR = 1570
+ ER_EVENT_SET_VAR_ERROR = 1571
+ ER_PARTITION_MERGE_ERROR = 1572
+ ER_CANT_ACTIVATE_LOG = 1573
+ ER_RBR_NOT_AVAILABLE = 1574
+ ER_BASE64_DECODE_ERROR = 1575
+ ER_EVENT_RECURSION_FORBIDDEN = 1576
+ ER_EVENTS_DB_ERROR = 1577
+ ER_ONLY_INTEGERS_ALLOWED = 1578
+ ER_UNSUPORTED_LOG_ENGINE = 1579
+ ER_BAD_LOG_STATEMENT = 1580
+ ER_CANT_RENAME_LOG_TABLE = 1581
+ ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582
+ ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583
+ ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584
+ ER_NATIVE_FCT_NAME_COLLISION = 1585
+ ER_DUP_ENTRY_WITH_KEY_NAME = 1586
+ ER_BINLOG_PURGE_EMFILE = 1587
+ ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588
+ ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589
+ ER_SLAVE_INCIDENT = 1590
+ ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591
+ ER_BINLOG_UNSAFE_STATEMENT = 1592
+ ER_SLAVE_FATAL_ERROR = 1593
+ ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594
+ ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595
+ ER_SLAVE_CREATE_EVENT_FAILURE = 1596
+ ER_SLAVE_MASTER_COM_FAILURE = 1597
+ ER_BINLOG_LOGGING_IMPOSSIBLE = 1598
+ ER_VIEW_NO_CREATION_CTX = 1599
+ ER_VIEW_INVALID_CREATION_CTX = 1600
+ ER_SR_INVALID_CREATION_CTX = 1601
+ ER_TRG_CORRUPTED_FILE = 1602
+ ER_TRG_NO_CREATION_CTX = 1603
+ ER_TRG_INVALID_CREATION_CTX = 1604
+ ER_EVENT_INVALID_CREATION_CTX = 1605
+ ER_TRG_CANT_OPEN_TABLE = 1606
+ ER_CANT_CREATE_SROUTINE = 1607
+ ER_NEVER_USED = 1608
+ ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609
+ ER_SLAVE_CORRUPT_EVENT = 1610
+ ER_LOAD_DATA_INVALID_COLUMN = 1611
+ ER_LOG_PURGE_NO_FILE = 1612
+ ER_XA_RBTIMEOUT = 1613
+ ER_XA_RBDEADLOCK = 1614
+ ER_NEED_REPREPARE = 1615
+ ER_DELAYED_NOT_SUPPORTED = 1616
+ WARN_NO_MASTER_INFO = 1617
+ WARN_OPTION_IGNORED = 1618
+ WARN_PLUGIN_DELETE_BUILTIN = 1619
+ WARN_PLUGIN_BUSY = 1620
+ ER_VARIABLE_IS_READONLY = 1621
+ ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622
+ ER_SLAVE_HEARTBEAT_FAILURE = 1623
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624
+ ER_NDB_REPLICATION_SCHEMA_ERROR = 1625
+ ER_CONFLICT_FN_PARSE_ERROR = 1626
+ ER_EXCEPTIONS_WRITE_ERROR = 1627
+ ER_TOO_LONG_TABLE_COMMENT = 1628
+ ER_TOO_LONG_FIELD_COMMENT = 1629
+ ER_FUNC_INEXISTENT_NAME_COLLISION = 1630
+ ER_DATABASE_NAME = 1631
+ ER_TABLE_NAME = 1632
+ ER_PARTITION_NAME = 1633
+ ER_SUBPARTITION_NAME = 1634
+ ER_TEMPORARY_NAME = 1635
+ ER_RENAMED_NAME = 1636
+ ER_TOO_MANY_CONCURRENT_TRXS = 1637
+ WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638
+ ER_DEBUG_SYNC_TIMEOUT = 1639
+ ER_DEBUG_SYNC_HIT_LIMIT = 1640
+ ER_DUP_SIGNAL_SET = 1641
+ ER_SIGNAL_WARN = 1642
+ ER_SIGNAL_NOT_FOUND = 1643
+ ER_SIGNAL_EXCEPTION = 1644
+ ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645
+ ER_SIGNAL_BAD_CONDITION_TYPE = 1646
+ WARN_COND_ITEM_TRUNCATED = 1647
+ ER_COND_ITEM_TOO_LONG = 1648
+ ER_UNKNOWN_LOCALE = 1649
+ ER_SLAVE_IGNORE_SERVER_IDS = 1650
+ ER_QUERY_CACHE_DISABLED = 1651
+ ER_SAME_NAME_PARTITION_FIELD = 1652
+ ER_PARTITION_COLUMN_LIST_ERROR = 1653
+ ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654
+ ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655
+ ER_MAXVALUE_IN_VALUES_IN = 1656
+ ER_TOO_MANY_VALUES_ERROR = 1657
+ ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658
+ ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659
+ ER_PARTITION_FIELDS_TOO_LONG = 1660
+ ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661
+ ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662
+ ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663
+ ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664
+ ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665
+ ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666
+ ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667
+ ER_BINLOG_UNSAFE_LIMIT = 1668
+ ER_BINLOG_UNSAFE_INSERT_DELAYED = 1669
+ ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670
+ ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671
+ ER_BINLOG_UNSAFE_UDF = 1672
+ ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673
+ ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674
+ ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675
+ ER_MESSAGE_AND_STATEMENT = 1676
+ ER_SLAVE_CONVERSION_FAILED = 1677
+ ER_SLAVE_CANT_CREATE_CONVERSION = 1678
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679
+ ER_PATH_LENGTH = 1680
+ ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681
+ ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682
+ ER_WRONG_PERFSCHEMA_USAGE = 1683
+ ER_WARN_I_S_SKIPPED_TABLE = 1684
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686
+ ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687
+ ER_TOO_LONG_INDEX_COMMENT = 1688
+ ER_LOCK_ABORTED = 1689
+ ER_DATA_OUT_OF_RANGE = 1690
+ ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691
+ ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692
+ ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695
+ ER_FAILED_READ_FROM_PAR_FILE = 1696
+ ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697
+ ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698
+ ER_SET_PASSWORD_AUTH_PLUGIN = 1699
+ ER_GRANT_PLUGIN_USER_EXISTS = 1700
+ ER_TRUNCATE_ILLEGAL_FK = 1701
+ ER_PLUGIN_IS_PERMANENT = 1702
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704
+ ER_STMT_CACHE_FULL = 1705
+ ER_MULTI_UPDATE_KEY_CONFLICT = 1706
+ ER_TABLE_NEEDS_REBUILD = 1707
+ WARN_OPTION_BELOW_LIMIT = 1708
+ ER_INDEX_COLUMN_TOO_LONG = 1709
+ ER_ERROR_IN_TRIGGER_BODY = 1710
+ ER_ERROR_IN_UNKNOWN_TRIGGER_BODY = 1711
+ ER_INDEX_CORRUPT = 1712
+ ER_UNDO_RECORD_TOO_BIG = 1713
+ ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT = 1714
+ ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE = 1715
+ ER_BINLOG_UNSAFE_REPLACE_SELECT = 1716
+ ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT = 1717
+ ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT = 1718
+ ER_BINLOG_UNSAFE_UPDATE_IGNORE = 1719
+ ER_PLUGIN_NO_UNINSTALL = 1720
+ ER_PLUGIN_NO_INSTALL = 1721
+ ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT = 1722
+ ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC = 1723
+ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS = 1724
+ ER_TABLE_IN_FK_CHECK = 1725
+ ER_UNSUPPORTED_ENGINE = 1726
+ ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST = 1727
+ ER_CANNOT_LOAD_FROM_TABLE_V2 = 1728
+ ER_MASTER_DELAY_VALUE_OUT_OF_RANGE = 1729
+ ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT = 1730
+ ER_PARTITION_EXCHANGE_DIFFERENT_OPTION = 1731
+ ER_PARTITION_EXCHANGE_PART_TABLE = 1732
+ ER_PARTITION_EXCHANGE_TEMP_TABLE = 1733
+ ER_PARTITION_INSTEAD_OF_SUBPARTITION = 1734
+ ER_UNKNOWN_PARTITION = 1735
+ ER_TABLES_DIFFERENT_METADATA = 1736
+ ER_ROW_DOES_NOT_MATCH_PARTITION = 1737
+ ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX = 1738
+ ER_WARN_INDEX_NOT_APPLICABLE = 1739
+ ER_PARTITION_EXCHANGE_FOREIGN_KEY = 1740
+ ER_NO_SUCH_KEY_VALUE = 1741
+ ER_RPL_INFO_DATA_TOO_LONG = 1742
+ ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE = 1743
+ ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE = 1744
+ ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX = 1745
+ ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT = 1746
+ ER_PARTITION_CLAUSE_ON_NONPARTITIONED = 1747
+ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET = 1748
+ ER_NO_SUCH_PARTITION__UNUSED = 1749
+ ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE = 1750
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE = 1751
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE = 1752
+ ER_MTS_FEATURE_IS_NOT_SUPPORTED = 1753
+ ER_MTS_UPDATED_DBS_GREATER_MAX = 1754
+ ER_MTS_CANT_PARALLEL = 1755
+ ER_MTS_INCONSISTENT_DATA = 1756
+ ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING = 1757
+ ER_DA_INVALID_CONDITION_NUMBER = 1758
+ ER_INSECURE_PLAIN_TEXT = 1759
+ ER_INSECURE_CHANGE_MASTER = 1760
+ ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO = 1761
+ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO = 1762
+ ER_SQLTHREAD_WITH_SECURE_SLAVE = 1763
+ ER_TABLE_HAS_NO_FT = 1764
+ ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER = 1765
+ ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION = 1766
+ ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST = 1767
+ ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL = 1768
+ ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION = 1769
+ ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL = 1770
+ ER_SKIPPING_LOGGED_TRANSACTION = 1771
+ ER_MALFORMED_GTID_SET_SPECIFICATION = 1772
+ ER_MALFORMED_GTID_SET_ENCODING = 1773
+ ER_MALFORMED_GTID_SPECIFICATION = 1774
+ ER_GNO_EXHAUSTED = 1775
+ ER_BAD_SLAVE_AUTO_POSITION = 1776
+ ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON = 1777
+ ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET = 1778
+ ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON = 1779
+ ER_GTID_MODE_REQUIRES_BINLOG = 1780
+ ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF = 1781
+ ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON = 1782
+ ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF = 1783
+ ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF = 1784
+ ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE = 1785
+ ER_GTID_UNSAFE_CREATE_SELECT = 1786
+ ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION = 1787
+ ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME = 1788
+ ER_MASTER_HAS_PURGED_REQUIRED_GTIDS = 1789
+ ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID = 1790
+ ER_UNKNOWN_EXPLAIN_FORMAT = 1791
+ ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION = 1792
+ ER_TOO_LONG_TABLE_PARTITION_COMMENT = 1793
+ ER_SLAVE_CONFIGURATION = 1794
+ ER_INNODB_FT_LIMIT = 1795
+ ER_INNODB_NO_FT_TEMP_TABLE = 1796
+ ER_INNODB_FT_WRONG_DOCID_COLUMN = 1797
+ ER_INNODB_FT_WRONG_DOCID_INDEX = 1798
+ ER_INNODB_ONLINE_LOG_TOO_BIG = 1799
+ ER_UNKNOWN_ALTER_ALGORITHM = 1800
+ ER_UNKNOWN_ALTER_LOCK = 1801
+ ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS = 1802
+ ER_MTS_RECOVERY_FAILURE = 1803
+ ER_MTS_RESET_WORKERS = 1804
+ ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2 = 1805
+ ER_SLAVE_SILENT_RETRY_TRANSACTION = 1806
+ ER_DISCARD_FK_CHECKS_RUNNING = 1807
+ ER_TABLE_SCHEMA_MISMATCH = 1808
+ ER_TABLE_IN_SYSTEM_TABLESPACE = 1809
+ ER_IO_READ_ERROR = 1810
+ ER_IO_WRITE_ERROR = 1811
+ ER_TABLESPACE_MISSING = 1812
+ ER_TABLESPACE_EXISTS = 1813
+ ER_TABLESPACE_DISCARDED = 1814
+ ER_INTERNAL_ERROR = 1815
+ ER_INNODB_IMPORT_ERROR = 1816
+ ER_INNODB_INDEX_CORRUPT = 1817
+ ER_INVALID_YEAR_COLUMN_LENGTH = 1818
+ ER_NOT_VALID_PASSWORD = 1819
+ ER_MUST_CHANGE_PASSWORD = 1820
+ ER_FK_NO_INDEX_CHILD = 1821
+ ER_FK_NO_INDEX_PARENT = 1822
+ ER_FK_FAIL_ADD_SYSTEM = 1823
+ ER_FK_CANNOT_OPEN_PARENT = 1824
+ ER_FK_INCORRECT_OPTION = 1825
+ ER_FK_DUP_NAME = 1826
+ ER_PASSWORD_FORMAT = 1827
+ ER_FK_COLUMN_CANNOT_DROP = 1828
+ ER_FK_COLUMN_CANNOT_DROP_CHILD = 1829
+ ER_FK_COLUMN_NOT_NULL = 1830
+ ER_DUP_INDEX = 1831
+ ER_FK_COLUMN_CANNOT_CHANGE = 1832
+ ER_FK_COLUMN_CANNOT_CHANGE_CHILD = 1833
+ ER_FK_CANNOT_DELETE_PARENT = 1834
+ ER_MALFORMED_PACKET = 1835
+ ER_READ_ONLY_MODE = 1836
+ ER_GTID_NEXT_TYPE_UNDEFINED_GROUP = 1837
+ ER_VARIABLE_NOT_SETTABLE_IN_SP = 1838
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF = 1839
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY = 1840
+ ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY = 1841
+ ER_GTID_PURGED_WAS_CHANGED = 1842
+ ER_GTID_EXECUTED_WAS_CHANGED = 1843
+ ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES = 1844
+ ER_ALTER_OPERATION_NOT_SUPPORTED = 1845
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON = 1846
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY = 1847
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION = 1848
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME = 1849
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE = 1850
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK = 1851
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE = 1852
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK = 1853
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC = 1854
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS = 1855
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS = 1856
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS = 1857
+ ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE = 1858
+ ER_DUP_UNKNOWN_IN_INDEX = 1859
+ ER_IDENT_CAUSES_TOO_LONG_PATH = 1860
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL = 1861
+ ER_MUST_CHANGE_PASSWORD_LOGIN = 1862
+ ER_ROW_IN_WRONG_PARTITION = 1863
+ ER_ERROR_LAST = 1863
+)
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/errname.go b/vendor/github.com/siddontang/go-mysql/mysql/errname.go
new file mode 100644
index 000000000..7f8575a72
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/errname.go
@@ -0,0 +1,868 @@
+package mysql
+
+var MySQLErrName = map[uint16]string{
+ ER_HASHCHK: "hashchk",
+ ER_NISAMCHK: "isamchk",
+ ER_NO: "NO",
+ ER_YES: "YES",
+ ER_CANT_CREATE_FILE: "Can't create file '%-.200s' (errno: %d - %s)",
+ ER_CANT_CREATE_TABLE: "Can't create table '%-.200s' (errno: %d)",
+ ER_CANT_CREATE_DB: "Can't create database '%-.192s' (errno: %d)",
+ ER_DB_CREATE_EXISTS: "Can't create database '%-.192s'; database exists",
+ ER_DB_DROP_EXISTS: "Can't drop database '%-.192s'; database doesn't exist",
+ ER_DB_DROP_DELETE: "Error dropping database (can't delete '%-.192s', errno: %d)",
+ ER_DB_DROP_RMDIR: "Error dropping database (can't rmdir '%-.192s', errno: %d)",
+ ER_CANT_DELETE_FILE: "Error on delete of '%-.192s' (errno: %d - %s)",
+ ER_CANT_FIND_SYSTEM_REC: "Can't read record in system table",
+ ER_CANT_GET_STAT: "Can't get status of '%-.200s' (errno: %d - %s)",
+ ER_CANT_GET_WD: "Can't get working directory (errno: %d - %s)",
+ ER_CANT_LOCK: "Can't lock file (errno: %d - %s)",
+ ER_CANT_OPEN_FILE: "Can't open file: '%-.200s' (errno: %d - %s)",
+ ER_FILE_NOT_FOUND: "Can't find file: '%-.200s' (errno: %d - %s)",
+ ER_CANT_READ_DIR: "Can't read dir of '%-.192s' (errno: %d - %s)",
+ ER_CANT_SET_WD: "Can't change dir to '%-.192s' (errno: %d - %s)",
+ ER_CHECKREAD: "Record has changed since last read in table '%-.192s'",
+ ER_DISK_FULL: "Disk full (%s); waiting for someone to free some space... (errno: %d - %s)",
+ ER_DUP_KEY: "Can't write; duplicate key in table '%-.192s'",
+ ER_ERROR_ON_CLOSE: "Error on close of '%-.192s' (errno: %d - %s)",
+ ER_ERROR_ON_READ: "Error reading file '%-.200s' (errno: %d - %s)",
+ ER_ERROR_ON_RENAME: "Error on rename of '%-.210s' to '%-.210s' (errno: %d - %s)",
+ ER_ERROR_ON_WRITE: "Error writing file '%-.200s' (errno: %d - %s)",
+ ER_FILE_USED: "'%-.192s' is locked against change",
+ ER_FILSORT_ABORT: "Sort aborted",
+ ER_FORM_NOT_FOUND: "View '%-.192s' doesn't exist for '%-.192s'",
+ ER_GET_ERRNO: "Got error %d from storage engine",
+ ER_ILLEGAL_HA: "Table storage engine for '%-.192s' doesn't have this option",
+ ER_KEY_NOT_FOUND: "Can't find record in '%-.192s'",
+ ER_NOT_FORM_FILE: "Incorrect information in file: '%-.200s'",
+ ER_NOT_KEYFILE: "Incorrect key file for table '%-.200s'; try to repair it",
+ ER_OLD_KEYFILE: "Old key file for table '%-.192s'; repair it!",
+ ER_OPEN_AS_READONLY: "Table '%-.192s' is read only",
+ ER_OUTOFMEMORY: "Out of memory; restart server and try again (needed %d bytes)",
+ ER_OUT_OF_SORTMEMORY: "Out of sort memory, consider increasing server sort buffer size",
+ ER_UNEXPECTED_EOF: "Unexpected EOF found when reading file '%-.192s' (errno: %d - %s)",
+ ER_CON_COUNT_ERROR: "Too many connections",
+ ER_OUT_OF_RESOURCES: "Out of memory; check if mysqld or some other process uses all available memory; if not, you may have to use 'ulimit' to allow mysqld to use more memory or you can add more swap space",
+ ER_BAD_HOST_ERROR: "Can't get hostname for your address",
+ ER_HANDSHAKE_ERROR: "Bad handshake",
+ ER_DBACCESS_DENIED_ERROR: "Access denied for user '%-.48s'@'%-.64s' to database '%-.192s'",
+ ER_ACCESS_DENIED_ERROR: "Access denied for user '%-.48s'@'%-.64s' (using password: %s)",
+ ER_NO_DB_ERROR: "No database selected",
+ ER_UNKNOWN_COM_ERROR: "Unknown command",
+ ER_BAD_NULL_ERROR: "Column '%-.192s' cannot be null",
+ ER_BAD_DB_ERROR: "Unknown database '%-.192s'",
+ ER_TABLE_EXISTS_ERROR: "Table '%-.192s' already exists",
+ ER_BAD_TABLE_ERROR: "Unknown table '%-.100s'",
+ ER_NON_UNIQ_ERROR: "Column '%-.192s' in %-.192s is ambiguous",
+ ER_SERVER_SHUTDOWN: "Server shutdown in progress",
+ ER_BAD_FIELD_ERROR: "Unknown column '%-.192s' in '%-.192s'",
+ ER_WRONG_FIELD_WITH_GROUP: "'%-.192s' isn't in GROUP BY",
+ ER_WRONG_GROUP_FIELD: "Can't group on '%-.192s'",
+ ER_WRONG_SUM_SELECT: "Statement has sum functions and columns in same statement",
+ ER_WRONG_VALUE_COUNT: "Column count doesn't match value count",
+ ER_TOO_LONG_IDENT: "Identifier name '%-.100s' is too long",
+ ER_DUP_FIELDNAME: "Duplicate column name '%-.192s'",
+ ER_DUP_KEYNAME: "Duplicate key name '%-.192s'",
+ ER_DUP_ENTRY: "Duplicate entry '%-.192s' for key %d",
+ ER_WRONG_FIELD_SPEC: "Incorrect column specifier for column '%-.192s'",
+ ER_PARSE_ERROR: "%s near '%-.80s' at line %d",
+ ER_EMPTY_QUERY: "Query was empty",
+ ER_NONUNIQ_TABLE: "Not unique table/alias: '%-.192s'",
+ ER_INVALID_DEFAULT: "Invalid default value for '%-.192s'",
+ ER_MULTIPLE_PRI_KEY: "Multiple primary key defined",
+ ER_TOO_MANY_KEYS: "Too many keys specified; max %d keys allowed",
+ ER_TOO_MANY_KEY_PARTS: "Too many key parts specified; max %d parts allowed",
+ ER_TOO_LONG_KEY: "Specified key was too long; max key length is %d bytes",
+ ER_KEY_COLUMN_DOES_NOT_EXITS: "Key column '%-.192s' doesn't exist in table",
+ ER_BLOB_USED_AS_KEY: "BLOB column '%-.192s' can't be used in key specification with the used table type",
+ ER_TOO_BIG_FIELDLENGTH: "Column length too big for column '%-.192s' (max = %lu); use BLOB or TEXT instead",
+ ER_WRONG_AUTO_KEY: "Incorrect table definition; there can be only one auto column and it must be defined as a key",
+ ER_READY: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d",
+ ER_NORMAL_SHUTDOWN: "%s: Normal shutdown\n",
+ ER_GOT_SIGNAL: "%s: Got signal %d. Aborting!\n",
+ ER_SHUTDOWN_COMPLETE: "%s: Shutdown complete\n",
+ ER_FORCING_CLOSE: "%s: Forcing close of thread %ld user: '%-.48s'\n",
+ ER_IPSOCK_ERROR: "Can't create IP socket",
+ ER_NO_SUCH_INDEX: "Table '%-.192s' has no index like the one used in CREATE INDEX; recreate the table",
+ ER_WRONG_FIELD_TERMINATORS: "Field separator argument is not what is expected; check the manual",
+ ER_BLOBS_AND_NO_TERMINATED: "You can't use fixed rowlength with BLOBs; please use 'fields terminated by'",
+ ER_TEXTFILE_NOT_READABLE: "The file '%-.128s' must be in the database directory or be readable by all",
+ ER_FILE_EXISTS_ERROR: "File '%-.200s' already exists",
+ ER_LOAD_INFO: "Records: %ld Deleted: %ld Skipped: %ld Warnings: %ld",
+ ER_ALTER_INFO: "Records: %ld Duplicates: %ld",
+ ER_WRONG_SUB_KEY: "Incorrect prefix key; the used key part isn't a string, the used length is longer than the key part, or the storage engine doesn't support unique prefix keys",
+ ER_CANT_REMOVE_ALL_FIELDS: "You can't delete all columns with ALTER TABLE; use DROP TABLE instead",
+ ER_CANT_DROP_FIELD_OR_KEY: "Can't DROP '%-.192s'; check that column/key exists",
+ ER_INSERT_INFO: "Records: %ld Duplicates: %ld Warnings: %ld",
+ ER_UPDATE_TABLE_USED: "You can't specify target table '%-.192s' for update in FROM clause",
+ ER_NO_SUCH_THREAD: "Unknown thread id: %lu",
+ ER_KILL_DENIED_ERROR: "You are not owner of thread %lu",
+ ER_NO_TABLES_USED: "No tables used",
+ ER_TOO_BIG_SET: "Too many strings for column %-.192s and SET",
+ ER_NO_UNIQUE_LOGFILE: "Can't generate a unique log-filename %-.200s.(1-999)\n",
+ ER_TABLE_NOT_LOCKED_FOR_WRITE: "Table '%-.192s' was locked with a READ lock and can't be updated",
+ ER_TABLE_NOT_LOCKED: "Table '%-.192s' was not locked with LOCK TABLES",
+ ER_BLOB_CANT_HAVE_DEFAULT: "BLOB/TEXT column '%-.192s' can't have a default value",
+ ER_WRONG_DB_NAME: "Incorrect database name '%-.100s'",
+ ER_WRONG_TABLE_NAME: "Incorrect table name '%-.100s'",
+ ER_TOO_BIG_SELECT: "The SELECT would examine more than MAX_JOIN_SIZE rows; check your WHERE and use SET SQL_BIG_SELECTS=1 or SET MAX_JOIN_SIZE=# if the SELECT is okay",
+ ER_UNKNOWN_ERROR: "Unknown error",
+ ER_UNKNOWN_PROCEDURE: "Unknown procedure '%-.192s'",
+ ER_WRONG_PARAMCOUNT_TO_PROCEDURE: "Incorrect parameter count to procedure '%-.192s'",
+ ER_WRONG_PARAMETERS_TO_PROCEDURE: "Incorrect parameters to procedure '%-.192s'",
+ ER_UNKNOWN_TABLE: "Unknown table '%-.192s' in %-.32s",
+ ER_FIELD_SPECIFIED_TWICE: "Column '%-.192s' specified twice",
+ ER_INVALID_GROUP_FUNC_USE: "Invalid use of group function",
+ ER_UNSUPPORTED_EXTENSION: "Table '%-.192s' uses an extension that doesn't exist in this MySQL version",
+ ER_TABLE_MUST_HAVE_COLUMNS: "A table must have at least 1 column",
+ ER_RECORD_FILE_FULL: "The table '%-.192s' is full",
+ ER_UNKNOWN_CHARACTER_SET: "Unknown character set: '%-.64s'",
+ ER_TOO_MANY_TABLES: "Too many tables; MySQL can only use %d tables in a join",
+ ER_TOO_MANY_FIELDS: "Too many columns",
+ ER_TOO_BIG_ROWSIZE: "Row size too large. The maximum row size for the used table type, not counting BLOBs, is %ld. This includes storage overhead, check the manual. You have to change some columns to TEXT or BLOBs",
+ ER_STACK_OVERRUN: "Thread stack overrun: Used: %ld of a %ld stack. Use 'mysqld --thread_stack=#' to specify a bigger stack if needed",
+ ER_WRONG_OUTER_JOIN: "Cross dependency found in OUTER JOIN; examine your ON conditions",
+ ER_NULL_COLUMN_IN_INDEX: "Table handler doesn't support NULL in given index. Please change column '%-.192s' to be NOT NULL or use another handler",
+ ER_CANT_FIND_UDF: "Can't load function '%-.192s'",
+ ER_CANT_INITIALIZE_UDF: "Can't initialize function '%-.192s'; %-.80s",
+ ER_UDF_NO_PATHS: "No paths allowed for shared library",
+ ER_UDF_EXISTS: "Function '%-.192s' already exists",
+ ER_CANT_OPEN_LIBRARY: "Can't open shared library '%-.192s' (errno: %d %-.128s)",
+ ER_CANT_FIND_DL_ENTRY: "Can't find symbol '%-.128s' in library",
+ ER_FUNCTION_NOT_DEFINED: "Function '%-.192s' is not defined",
+ ER_HOST_IS_BLOCKED: "Host '%-.64s' is blocked because of many connection errors; unblock with 'mysqladmin flush-hosts'",
+ ER_HOST_NOT_PRIVILEGED: "Host '%-.64s' is not allowed to connect to this MySQL server",
+ ER_PASSWORD_ANONYMOUS_USER: "You are using MySQL as an anonymous user and anonymous users are not allowed to change passwords",
+ ER_PASSWORD_NOT_ALLOWED: "You must have privileges to update tables in the mysql database to be able to change passwords for others",
+ ER_PASSWORD_NO_MATCH: "Can't find any matching row in the user table",
+ ER_UPDATE_INFO: "Rows matched: %ld Changed: %ld Warnings: %ld",
+ ER_CANT_CREATE_THREAD: "Can't create a new thread (errno %d); if you are not out of available memory, you can consult the manual for a possible OS-dependent bug",
+ ER_WRONG_VALUE_COUNT_ON_ROW: "Column count doesn't match value count at row %ld",
+ ER_CANT_REOPEN_TABLE: "Can't reopen table: '%-.192s'",
+ ER_INVALID_USE_OF_NULL: "Invalid use of NULL value",
+ ER_REGEXP_ERROR: "Got error '%-.64s' from regexp",
+ ER_MIX_OF_GROUP_FUNC_AND_FIELDS: "Mixing of GROUP columns (MIN(),MAX(),COUNT(),...) with no GROUP columns is illegal if there is no GROUP BY clause",
+ ER_NONEXISTING_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s'",
+ ER_TABLEACCESS_DENIED_ERROR: "%-.128s command denied to user '%-.48s'@'%-.64s' for table '%-.64s'",
+ ER_COLUMNACCESS_DENIED_ERROR: "%-.16s command denied to user '%-.48s'@'%-.64s' for column '%-.192s' in table '%-.192s'",
+ ER_ILLEGAL_GRANT_FOR_TABLE: "Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used",
+ ER_GRANT_WRONG_HOST_OR_USER: "The host or user argument to GRANT is too long",
+ ER_NO_SUCH_TABLE: "Table '%-.192s.%-.192s' doesn't exist",
+ ER_NONEXISTING_TABLE_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s' on table '%-.192s'",
+ ER_NOT_ALLOWED_COMMAND: "The used command is not allowed with this MySQL version",
+ ER_SYNTAX_ERROR: "You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use",
+ ER_DELAYED_CANT_CHANGE_LOCK: "Delayed insert thread couldn't get requested lock for table %-.192s",
+ ER_TOO_MANY_DELAYED_THREADS: "Too many delayed threads in use",
+ ER_ABORTING_CONNECTION: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' (%-.64s)",
+ ER_NET_PACKET_TOO_LARGE: "Got a packet bigger than 'max_allowed_packet' bytes",
+ ER_NET_READ_ERROR_FROM_PIPE: "Got a read error from the connection pipe",
+ ER_NET_FCNTL_ERROR: "Got an error from fcntl()",
+ ER_NET_PACKETS_OUT_OF_ORDER: "Got packets out of order",
+ ER_NET_UNCOMPRESS_ERROR: "Couldn't uncompress communication packet",
+ ER_NET_READ_ERROR: "Got an error reading communication packets",
+ ER_NET_READ_INTERRUPTED: "Got timeout reading communication packets",
+ ER_NET_ERROR_ON_WRITE: "Got an error writing communication packets",
+ ER_NET_WRITE_INTERRUPTED: "Got timeout writing communication packets",
+ ER_TOO_LONG_STRING: "Result string is longer than 'max_allowed_packet' bytes",
+ ER_TABLE_CANT_HANDLE_BLOB: "The used table type doesn't support BLOB/TEXT columns",
+ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: "The used table type doesn't support AUTO_INCREMENT columns",
+ ER_DELAYED_INSERT_TABLE_LOCKED: "INSERT DELAYED can't be used with table '%-.192s' because it is locked with LOCK TABLES",
+ ER_WRONG_COLUMN_NAME: "Incorrect column name '%-.100s'",
+ ER_WRONG_KEY_COLUMN: "The used storage engine can't index column '%-.192s'",
+ ER_WRONG_MRG_TABLE: "Unable to open underlying table which is differently defined or of non-MyISAM type or doesn't exist",
+ ER_DUP_UNIQUE: "Can't write, because of unique constraint, to table '%-.192s'",
+ ER_BLOB_KEY_WITHOUT_LENGTH: "BLOB/TEXT column '%-.192s' used in key specification without a key length",
+ ER_PRIMARY_CANT_HAVE_NULL: "All parts of a PRIMARY KEY must be NOT NULL; if you need NULL in a key, use UNIQUE instead",
+ ER_TOO_MANY_ROWS: "Result consisted of more than one row",
+ ER_REQUIRES_PRIMARY_KEY: "This table type requires a primary key",
+ ER_NO_RAID_COMPILED: "This version of MySQL is not compiled with RAID support",
+ ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE: "You are using safe update mode and you tried to update a table without a WHERE that uses a KEY column",
+ ER_KEY_DOES_NOT_EXITS: "Key '%-.192s' doesn't exist in table '%-.192s'",
+ ER_CHECK_NO_SUCH_TABLE: "Can't open table",
+ ER_CHECK_NOT_IMPLEMENTED: "The storage engine for the table doesn't support %s",
+ ER_CANT_DO_THIS_DURING_AN_TRANSACTION: "You are not allowed to execute this command in a transaction",
+ ER_ERROR_DURING_COMMIT: "Got error %d during COMMIT",
+ ER_ERROR_DURING_ROLLBACK: "Got error %d during ROLLBACK",
+ ER_ERROR_DURING_FLUSH_LOGS: "Got error %d during FLUSH_LOGS",
+ ER_ERROR_DURING_CHECKPOINT: "Got error %d during CHECKPOINT",
+ ER_NEW_ABORTING_CONNECTION: "Aborted connection %ld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)",
+ ER_DUMP_NOT_IMPLEMENTED: "The storage engine for the table does not support binary table dump",
+ ER_FLUSH_MASTER_BINLOG_CLOSED: "Binlog closed, cannot RESET MASTER",
+ ER_INDEX_REBUILD: "Failed rebuilding the index of dumped table '%-.192s'",
+ ER_MASTER: "Error from master: '%-.64s'",
+ ER_MASTER_NET_READ: "Net error reading from master",
+ ER_MASTER_NET_WRITE: "Net error writing to master",
+ ER_FT_MATCHING_KEY_NOT_FOUND: "Can't find FULLTEXT index matching the column list",
+ ER_LOCK_OR_ACTIVE_TRANSACTION: "Can't execute the given command because you have active locked tables or an active transaction",
+ ER_UNKNOWN_SYSTEM_VARIABLE: "Unknown system variable '%-.64s'",
+ ER_CRASHED_ON_USAGE: "Table '%-.192s' is marked as crashed and should be repaired",
+ ER_CRASHED_ON_REPAIR: "Table '%-.192s' is marked as crashed and last (automatic?) repair failed",
+ ER_WARNING_NOT_COMPLETE_ROLLBACK: "Some non-transactional changed tables couldn't be rolled back",
+ ER_TRANS_CACHE_FULL: "Multi-statement transaction required more than 'max_binlog_cache_size' bytes of storage; increase this mysqld variable and try again",
+ ER_SLAVE_MUST_STOP: "This operation cannot be performed with a running slave; run STOP SLAVE first",
+ ER_SLAVE_NOT_RUNNING: "This operation requires a running slave; configure slave and do START SLAVE",
+ ER_BAD_SLAVE: "The server is not configured as slave; fix in config file or with CHANGE MASTER TO",
+ ER_MASTER_INFO: "Could not initialize master info structure; more error messages can be found in the MySQL error log",
+ ER_SLAVE_THREAD: "Could not create slave thread; check system resources",
+ ER_TOO_MANY_USER_CONNECTIONS: "User %-.64s already has more than 'max_user_connections' active connections",
+ ER_SET_CONSTANTS_ONLY: "You may only use constant expressions with SET",
+ ER_LOCK_WAIT_TIMEOUT: "Lock wait timeout exceeded; try restarting transaction",
+ ER_LOCK_TABLE_FULL: "The total number of locks exceeds the lock table size",
+ ER_READ_ONLY_TRANSACTION: "Update locks cannot be acquired during a READ UNCOMMITTED transaction",
+ ER_DROP_DB_WITH_READ_LOCK: "DROP DATABASE not allowed while thread is holding global read lock",
+ ER_CREATE_DB_WITH_READ_LOCK: "CREATE DATABASE not allowed while thread is holding global read lock",
+ ER_WRONG_ARGUMENTS: "Incorrect arguments to %s",
+ ER_NO_PERMISSION_TO_CREATE_USER: "'%-.48s'@'%-.64s' is not allowed to create new users",
+ ER_UNION_TABLES_IN_DIFFERENT_DIR: "Incorrect table definition; all MERGE tables must be in the same database",
+ ER_LOCK_DEADLOCK: "Deadlock found when trying to get lock; try restarting transaction",
+ ER_TABLE_CANT_HANDLE_FT: "The used table type doesn't support FULLTEXT indexes",
+ ER_CANNOT_ADD_FOREIGN: "Cannot add foreign key constraint",
+ ER_NO_REFERENCED_ROW: "Cannot add or update a child row: a foreign key constraint fails",
+ ER_ROW_IS_REFERENCED: "Cannot delete or update a parent row: a foreign key constraint fails",
+ ER_CONNECT_TO_MASTER: "Error connecting to master: %-.128s",
+ ER_QUERY_ON_MASTER: "Error running query on master: %-.128s",
+ ER_ERROR_WHEN_EXECUTING_COMMAND: "Error when executing command %s: %-.128s",
+ ER_WRONG_USAGE: "Incorrect usage of %s and %s",
+ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: "The used SELECT statements have a different number of columns",
+ ER_CANT_UPDATE_WITH_READLOCK: "Can't execute the query because you have a conflicting read lock",
+ ER_MIXING_NOT_ALLOWED: "Mixing of transactional and non-transactional tables is disabled",
+ ER_DUP_ARGUMENT: "Option '%s' used twice in statement",
+ ER_USER_LIMIT_REACHED: "User '%-.64s' has exceeded the '%s' resource (current value: %ld)",
+ ER_SPECIFIC_ACCESS_DENIED_ERROR: "Access denied; you need (at least one of) the %-.128s privilege(s) for this operation",
+ ER_LOCAL_VARIABLE: "Variable '%-.64s' is a SESSION variable and can't be used with SET GLOBAL",
+ ER_GLOBAL_VARIABLE: "Variable '%-.64s' is a GLOBAL variable and should be set with SET GLOBAL",
+ ER_NO_DEFAULT: "Variable '%-.64s' doesn't have a default value",
+ ER_WRONG_VALUE_FOR_VAR: "Variable '%-.64s' can't be set to the value of '%-.200s'",
+ ER_WRONG_TYPE_FOR_VAR: "Incorrect argument type to variable '%-.64s'",
+ ER_VAR_CANT_BE_READ: "Variable '%-.64s' can only be set, not read",
+ ER_CANT_USE_OPTION_HERE: "Incorrect usage/placement of '%s'",
+ ER_NOT_SUPPORTED_YET: "This version of MySQL doesn't yet support '%s'",
+ ER_MASTER_FATAL_ERROR_READING_BINLOG: "Got fatal error %d from master when reading data from binary log: '%-.320s'",
+ ER_SLAVE_IGNORED_TABLE: "Slave SQL thread ignored the query because of replicate-*-table rules",
+ ER_INCORRECT_GLOBAL_LOCAL_VAR: "Variable '%-.192s' is a %s variable",
+ ER_WRONG_FK_DEF: "Incorrect foreign key definition for '%-.192s': %s",
+ ER_KEY_REF_DO_NOT_MATCH_TABLE_REF: "Key reference and table reference don't match",
+ ER_OPERAND_COLUMNS: "Operand should contain %d column(s)",
+ ER_SUBQUERY_NO_1_ROW: "Subquery returns more than 1 row",
+ ER_UNKNOWN_STMT_HANDLER: "Unknown prepared statement handler (%.*s) given to %s",
+ ER_CORRUPT_HELP_DB: "Help database is corrupt or does not exist",
+ ER_CYCLIC_REFERENCE: "Cyclic reference on subqueries",
+ ER_AUTO_CONVERT: "Converting column '%s' from %s to %s",
+ ER_ILLEGAL_REFERENCE: "Reference '%-.64s' not supported (%s)",
+ ER_DERIVED_MUST_HAVE_ALIAS: "Every derived table must have its own alias",
+ ER_SELECT_REDUCED: "Select %u was reduced during optimization",
+ ER_TABLENAME_NOT_ALLOWED_HERE: "Table '%-.192s' from one of the SELECTs cannot be used in %-.32s",
+ ER_NOT_SUPPORTED_AUTH_MODE: "Client does not support authentication protocol requested by server; consider upgrading MySQL client",
+ ER_SPATIAL_CANT_HAVE_NULL: "All parts of a SPATIAL index must be NOT NULL",
+ ER_COLLATION_CHARSET_MISMATCH: "COLLATION '%s' is not valid for CHARACTER SET '%s'",
+ ER_SLAVE_WAS_RUNNING: "Slave is already running",
+ ER_SLAVE_WAS_NOT_RUNNING: "Slave already has been stopped",
+ ER_TOO_BIG_FOR_UNCOMPRESS: "Uncompressed data size too large; the maximum size is %d (probably, length of uncompressed data was corrupted)",
+ ER_ZLIB_Z_MEM_ERROR: "ZLIB: Not enough memory",
+ ER_ZLIB_Z_BUF_ERROR: "ZLIB: Not enough room in the output buffer (probably, length of uncompressed data was corrupted)",
+ ER_ZLIB_Z_DATA_ERROR: "ZLIB: Input data corrupted",
+ ER_CUT_VALUE_GROUP_CONCAT: "Row %u was cut by GROUP_CONCAT()",
+ ER_WARN_TOO_FEW_RECORDS: "Row %ld doesn't contain data for all columns",
+ ER_WARN_TOO_MANY_RECORDS: "Row %ld was truncated; it contained more data than there were input columns",
+ ER_WARN_NULL_TO_NOTNULL: "Column set to default value; NULL supplied to NOT NULL column '%s' at row %ld",
+ ER_WARN_DATA_OUT_OF_RANGE: "Out of range value for column '%s' at row %ld",
+ WARN_DATA_TRUNCATED: "Data truncated for column '%s' at row %ld",
+ ER_WARN_USING_OTHER_HANDLER: "Using storage engine %s for table '%s'",
+ ER_CANT_AGGREGATE_2COLLATIONS: "Illegal mix of collations (%s,%s) and (%s,%s) for operation '%s'",
+ ER_DROP_USER: "Cannot drop one or more of the requested users",
+ ER_REVOKE_GRANTS: "Can't revoke all privileges for one or more of the requested users",
+ ER_CANT_AGGREGATE_3COLLATIONS: "Illegal mix of collations (%s,%s), (%s,%s), (%s,%s) for operation '%s'",
+ ER_CANT_AGGREGATE_NCOLLATIONS: "Illegal mix of collations for operation '%s'",
+ ER_VARIABLE_IS_NOT_STRUCT: "Variable '%-.64s' is not a variable component (can't be used as XXXX.variable_name)",
+ ER_UNKNOWN_COLLATION: "Unknown collation: '%-.64s'",
+ ER_SLAVE_IGNORED_SSL_PARAMS: "SSL parameters in CHANGE MASTER are ignored because this MySQL slave was compiled without SSL support; they can be used later if MySQL slave with SSL is started",
+ ER_SERVER_IS_IN_SECURE_AUTH_MODE: "Server is running in --secure-auth mode, but '%s'@'%s' has a password in the old format; please change the password to the new format",
+ ER_WARN_FIELD_RESOLVED: "Field or reference '%-.192s%s%-.192s%s%-.192s' of SELECT #%d was resolved in SELECT #%d",
+ ER_BAD_SLAVE_UNTIL_COND: "Incorrect parameter or combination of parameters for START SLAVE UNTIL",
+ ER_MISSING_SKIP_SLAVE: "It is recommended to use --skip-slave-start when doing step-by-step replication with START SLAVE UNTIL; otherwise, you will get problems if you get an unexpected slave's mysqld restart",
+ ER_UNTIL_COND_IGNORED: "SQL thread is not to be started so UNTIL options are ignored",
+ ER_WRONG_NAME_FOR_INDEX: "Incorrect index name '%-.100s'",
+ ER_WRONG_NAME_FOR_CATALOG: "Incorrect catalog name '%-.100s'",
+ ER_WARN_QC_RESIZE: "Query cache failed to set size %lu; new query cache size is %lu",
+ ER_BAD_FT_COLUMN: "Column '%-.192s' cannot be part of FULLTEXT index",
+ ER_UNKNOWN_KEY_CACHE: "Unknown key cache '%-.100s'",
+ ER_WARN_HOSTNAME_WONT_WORK: "MySQL is started in --skip-name-resolve mode; you must restart it without this switch for this grant to work",
+ ER_UNKNOWN_STORAGE_ENGINE: "Unknown storage engine '%s'",
+ ER_WARN_DEPRECATED_SYNTAX: "'%s' is deprecated and will be removed in a future release. Please use %s instead",
+ ER_NON_UPDATABLE_TABLE: "The target table %-.100s of the %s is not updatable",
+ ER_FEATURE_DISABLED: "The '%s' feature is disabled; you need MySQL built with '%s' to have it working",
+ ER_OPTION_PREVENTS_STATEMENT: "The MySQL server is running with the %s option so it cannot execute this statement",
+ ER_DUPLICATED_VALUE_IN_TYPE: "Column '%-.100s' has duplicated value '%-.64s' in %s",
+ ER_TRUNCATED_WRONG_VALUE: "Truncated incorrect %-.32s value: '%-.128s'",
+ ER_TOO_MUCH_AUTO_TIMESTAMP_COLS: "Incorrect table definition; there can be only one TIMESTAMP column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause",
+ ER_INVALID_ON_UPDATE: "Invalid ON UPDATE clause for '%-.192s' column",
+ ER_UNSUPPORTED_PS: "This command is not supported in the prepared statement protocol yet",
+ ER_GET_ERRMSG: "Got error %d '%-.100s' from %s",
+ ER_GET_TEMPORARY_ERRMSG: "Got temporary error %d '%-.100s' from %s",
+ ER_UNKNOWN_TIME_ZONE: "Unknown or incorrect time zone: '%-.64s'",
+ ER_WARN_INVALID_TIMESTAMP: "Invalid TIMESTAMP value in column '%s' at row %ld",
+ ER_INVALID_CHARACTER_STRING: "Invalid %s character string: '%.64s'",
+ ER_WARN_ALLOWED_PACKET_OVERFLOWED: "Result of %s() was larger than max_allowed_packet (%ld) - truncated",
+ ER_CONFLICTING_DECLARATIONS: "Conflicting declarations: '%s%s' and '%s%s'",
+ ER_SP_NO_RECURSIVE_CREATE: "Can't create a %s from within another stored routine",
+ ER_SP_ALREADY_EXISTS: "%s %s already exists",
+ ER_SP_DOES_NOT_EXIST: "%s %s does not exist",
+ ER_SP_DROP_FAILED: "Failed to DROP %s %s",
+ ER_SP_STORE_FAILED: "Failed to CREATE %s %s",
+ ER_SP_LILABEL_MISMATCH: "%s with no matching label: %s",
+ ER_SP_LABEL_REDEFINE: "Redefining label %s",
+ ER_SP_LABEL_MISMATCH: "End-label %s without match",
+ ER_SP_UNINIT_VAR: "Referring to uninitialized variable %s",
+ ER_SP_BADSELECT: "PROCEDURE %s can't return a result set in the given context",
+ ER_SP_BADRETURN: "RETURN is only allowed in a FUNCTION",
+ ER_SP_BADSTATEMENT: "%s is not allowed in stored procedures",
+ ER_UPDATE_LOG_DEPRECATED_IGNORED: "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been ignored.",
+ ER_UPDATE_LOG_DEPRECATED_TRANSLATED: "The update log is deprecated and replaced by the binary log; SET SQL_LOG_UPDATE has been translated to SET SQL_LOG_BIN.",
+ ER_QUERY_INTERRUPTED: "Query execution was interrupted",
+ ER_SP_WRONG_NO_OF_ARGS: "Incorrect number of arguments for %s %s; expected %u, got %u",
+ ER_SP_COND_MISMATCH: "Undefined CONDITION: %s",
+ ER_SP_NORETURN: "No RETURN found in FUNCTION %s",
+ ER_SP_NORETURNEND: "FUNCTION %s ended without RETURN",
+ ER_SP_BAD_CURSOR_QUERY: "Cursor statement must be a SELECT",
+ ER_SP_BAD_CURSOR_SELECT: "Cursor SELECT must not have INTO",
+ ER_SP_CURSOR_MISMATCH: "Undefined CURSOR: %s",
+ ER_SP_CURSOR_ALREADY_OPEN: "Cursor is already open",
+ ER_SP_CURSOR_NOT_OPEN: "Cursor is not open",
+ ER_SP_UNDECLARED_VAR: "Undeclared variable: %s",
+ ER_SP_WRONG_NO_OF_FETCH_ARGS: "Incorrect number of FETCH variables",
+ ER_SP_FETCH_NO_DATA: "No data - zero rows fetched, selected, or processed",
+ ER_SP_DUP_PARAM: "Duplicate parameter: %s",
+ ER_SP_DUP_VAR: "Duplicate variable: %s",
+ ER_SP_DUP_COND: "Duplicate condition: %s",
+ ER_SP_DUP_CURS: "Duplicate cursor: %s",
+ ER_SP_CANT_ALTER: "Failed to ALTER %s %s",
+ ER_SP_SUBSELECT_NYI: "Subquery value not supported",
+ ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: "%s is not allowed in stored function or trigger",
+ ER_SP_VARCOND_AFTER_CURSHNDLR: "Variable or condition declaration after cursor or handler declaration",
+ ER_SP_CURSOR_AFTER_HANDLER: "Cursor declaration after handler declaration",
+ ER_SP_CASE_NOT_FOUND: "Case not found for CASE statement",
+ ER_FPARSER_TOO_BIG_FILE: "Configuration file '%-.192s' is too big",
+ ER_FPARSER_BAD_HEADER: "Malformed file type header in file '%-.192s'",
+ ER_FPARSER_EOF_IN_COMMENT: "Unexpected end of file while parsing comment '%-.200s'",
+ ER_FPARSER_ERROR_IN_PARAMETER: "Error while parsing parameter '%-.192s' (line: '%-.192s')",
+ ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER: "Unexpected end of file while skipping unknown parameter '%-.192s'",
+ ER_VIEW_NO_EXPLAIN: "EXPLAIN/SHOW can not be issued; lacking privileges for underlying table",
+ ER_FRM_UNKNOWN_TYPE: "File '%-.192s' has unknown type '%-.64s' in its header",
+ ER_WRONG_OBJECT: "'%-.192s.%-.192s' is not %s",
+ ER_NONUPDATEABLE_COLUMN: "Column '%-.192s' is not updatable",
+ ER_VIEW_SELECT_DERIVED: "View's SELECT contains a subquery in the FROM clause",
+ ER_VIEW_SELECT_CLAUSE: "View's SELECT contains a '%s' clause",
+ ER_VIEW_SELECT_VARIABLE: "View's SELECT contains a variable or parameter",
+ ER_VIEW_SELECT_TMPTABLE: "View's SELECT refers to a temporary table '%-.192s'",
+ ER_VIEW_WRONG_LIST: "View's SELECT and view's field list have different column counts",
+ ER_WARN_VIEW_MERGE: "View merge algorithm can't be used here for now (assumed undefined algorithm)",
+ ER_WARN_VIEW_WITHOUT_KEY: "View being updated does not have complete key of underlying table in it",
+ ER_VIEW_INVALID: "View '%-.192s.%-.192s' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them",
+ ER_SP_NO_DROP_SP: "Can't drop or alter a %s from within another stored routine",
+ ER_SP_GOTO_IN_HNDLR: "GOTO is not allowed in a stored procedure handler",
+ ER_TRG_ALREADY_EXISTS: "Trigger already exists",
+ ER_TRG_DOES_NOT_EXIST: "Trigger does not exist",
+ ER_TRG_ON_VIEW_OR_TEMP_TABLE: "Trigger's '%-.192s' is view or temporary table",
+ ER_TRG_CANT_CHANGE_ROW: "Updating of %s row is not allowed in %strigger",
+ ER_TRG_NO_SUCH_ROW_IN_TRG: "There is no %s row in %s trigger",
+ ER_NO_DEFAULT_FOR_FIELD: "Field '%-.192s' doesn't have a default value",
+ ER_DIVISION_BY_ZERO: "Division by 0",
+ ER_TRUNCATED_WRONG_VALUE_FOR_FIELD: "Incorrect %-.32s value: '%-.128s' for column '%.192s' at row %ld",
+ ER_ILLEGAL_VALUE_FOR_TYPE: "Illegal %s '%-.192s' value found during parsing",
+ ER_VIEW_NONUPD_CHECK: "CHECK OPTION on non-updatable view '%-.192s.%-.192s'",
+ ER_VIEW_CHECK_FAILED: "CHECK OPTION failed '%-.192s.%-.192s'",
+ ER_PROCACCESS_DENIED_ERROR: "%-.16s command denied to user '%-.48s'@'%-.64s' for routine '%-.192s'",
+ ER_RELAY_LOG_FAIL: "Failed purging old relay logs: %s",
+ ER_PASSWD_LENGTH: "Password hash should be a %d-digit hexadecimal number",
+ ER_UNKNOWN_TARGET_BINLOG: "Target log not found in binlog index",
+ ER_IO_ERR_LOG_INDEX_READ: "I/O error reading log index file",
+ ER_BINLOG_PURGE_PROHIBITED: "Server configuration does not permit binlog purge",
+ ER_FSEEK_FAIL: "Failed on fseek()",
+ ER_BINLOG_PURGE_FATAL_ERR: "Fatal error during log purge",
+ ER_LOG_IN_USE: "A purgeable log is in use, will not purge",
+ ER_LOG_PURGE_UNKNOWN_ERR: "Unknown error during log purge",
+ ER_RELAY_LOG_INIT: "Failed initializing relay log position: %s",
+ ER_NO_BINARY_LOGGING: "You are not using binary logging",
+ ER_RESERVED_SYNTAX: "The '%-.64s' syntax is reserved for purposes internal to the MySQL server",
+ ER_WSAS_FAILED: "WSAStartup Failed",
+ ER_DIFF_GROUPS_PROC: "Can't handle procedures with different groups yet",
+ ER_NO_GROUP_FOR_PROC: "Select must have a group with this procedure",
+ ER_ORDER_WITH_PROC: "Can't use ORDER clause with this procedure",
+ ER_LOGGING_PROHIBIT_CHANGING_OF: "Binary logging and replication forbid changing the global server %s",
+ ER_NO_FILE_MAPPING: "Can't map file: %-.200s, errno: %d",
+ ER_WRONG_MAGIC: "Wrong magic in %-.64s",
+ ER_PS_MANY_PARAM: "Prepared statement contains too many placeholders",
+ ER_KEY_PART_0: "Key part '%-.192s' length cannot be 0",
+ ER_VIEW_CHECKSUM: "View text checksum failed",
+ ER_VIEW_MULTIUPDATE: "Can not modify more than one base table through a join view '%-.192s.%-.192s'",
+ ER_VIEW_NO_INSERT_FIELD_LIST: "Can not insert into join view '%-.192s.%-.192s' without fields list",
+ ER_VIEW_DELETE_MERGE_VIEW: "Can not delete from join view '%-.192s.%-.192s'",
+ ER_CANNOT_USER: "Operation %s failed for %.256s",
+ ER_XAER_NOTA: "XAER_NOTA: Unknown XID",
+ ER_XAER_INVAL: "XAER_INVAL: Invalid arguments (or unsupported command)",
+ ER_XAER_RMFAIL: "XAER_RMFAIL: The command cannot be executed when global transaction is in the %.64s state",
+ ER_XAER_OUTSIDE: "XAER_OUTSIDE: Some work is done outside global transaction",
+ ER_XAER_RMERR: "XAER_RMERR: Fatal error occurred in the transaction branch - check your data for consistency",
+ ER_XA_RBROLLBACK: "XA_RBROLLBACK: Transaction branch was rolled back",
+ ER_NONEXISTING_PROC_GRANT: "There is no such grant defined for user '%-.48s' on host '%-.64s' on routine '%-.192s'",
+ ER_PROC_AUTO_GRANT_FAIL: "Failed to grant EXECUTE and ALTER ROUTINE privileges",
+ ER_PROC_AUTO_REVOKE_FAIL: "Failed to revoke all privileges to dropped routine",
+ ER_DATA_TOO_LONG: "Data too long for column '%s' at row %ld",
+ ER_SP_BAD_SQLSTATE: "Bad SQLSTATE: '%s'",
+ ER_STARTUP: "%s: ready for connections.\nVersion: '%s' socket: '%s' port: %d %s",
+ ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR: "Can't load value from file with fixed size rows to variable",
+ ER_CANT_CREATE_USER_WITH_GRANT: "You are not allowed to create a user with GRANT",
+ ER_WRONG_VALUE_FOR_TYPE: "Incorrect %-.32s value: '%-.128s' for function %-.32s",
+ ER_TABLE_DEF_CHANGED: "Table definition has changed, please retry transaction",
+ ER_SP_DUP_HANDLER: "Duplicate handler declared in the same block",
+ ER_SP_NOT_VAR_ARG: "OUT or INOUT argument %d for routine %s is not a variable or NEW pseudo-variable in BEFORE trigger",
+ ER_SP_NO_RETSET: "Not allowed to return a result set from a %s",
+ ER_CANT_CREATE_GEOMETRY_OBJECT: "Cannot get geometry object from data you send to the GEOMETRY field",
+ ER_FAILED_ROUTINE_BREAK_BINLOG: "A routine failed and has neither NO SQL nor READS SQL DATA in its declaration and binary logging is enabled; if non-transactional tables were updated, the binary log will miss their changes",
+ ER_BINLOG_UNSAFE_ROUTINE: "This function has none of DETERMINISTIC, NO SQL, or READS SQL DATA in its declaration and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)",
+ ER_BINLOG_CREATE_ROUTINE_NEED_SUPER: "You do not have the SUPER privilege and binary logging is enabled (you *might* want to use the less safe log_bin_trust_function_creators variable)",
+ ER_EXEC_STMT_WITH_OPEN_CURSOR: "You can't execute a prepared statement which has an open cursor associated with it. Reset the statement to re-execute it.",
+ ER_STMT_HAS_NO_OPEN_CURSOR: "The statement (%lu) has no open cursor.",
+ ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG: "Explicit or implicit commit is not allowed in stored function or trigger.",
+ ER_NO_DEFAULT_FOR_VIEW_FIELD: "Field of view '%-.192s.%-.192s' underlying table doesn't have a default value",
+ ER_SP_NO_RECURSION: "Recursive stored functions and triggers are not allowed.",
+ ER_TOO_BIG_SCALE: "Too big scale %d specified for column '%-.192s'. Maximum is %lu.",
+ ER_TOO_BIG_PRECISION: "Too big precision %d specified for column '%-.192s'. Maximum is %lu.",
+ ER_M_BIGGER_THAN_D: "For float(M,D), double(M,D) or decimal(M,D), M must be >= D (column '%-.192s').",
+ ER_WRONG_LOCK_OF_SYSTEM_TABLE: "You can't combine write-locking of system tables with other tables or lock types",
+ ER_CONNECT_TO_FOREIGN_DATA_SOURCE: "Unable to connect to foreign data source: %.64s",
+ ER_QUERY_ON_FOREIGN_DATA_SOURCE: "There was a problem processing the query on the foreign data source. Data source error: %-.64s",
+ ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST: "The foreign data source you are trying to reference does not exist. Data source error: %-.64s",
+ ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE: "Can't create federated table. The data source connection string '%-.64s' is not in the correct format",
+ ER_FOREIGN_DATA_STRING_INVALID: "The data source connection string '%-.64s' is not in the correct format",
+ ER_CANT_CREATE_FEDERATED_TABLE: "Can't create federated table. Foreign data src error: %-.64s",
+ ER_TRG_IN_WRONG_SCHEMA: "Trigger in wrong schema",
+ ER_STACK_OVERRUN_NEED_MORE: "Thread stack overrun: %ld bytes used of a %ld byte stack, and %ld bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack.",
+ ER_TOO_LONG_BODY: "Routine body for '%-.100s' is too long",
+ ER_WARN_CANT_DROP_DEFAULT_KEYCACHE: "Cannot drop default keycache",
+ ER_TOO_BIG_DISPLAYWIDTH: "Display width out of range for column '%-.192s' (max = %lu)",
+ ER_XAER_DUPID: "XAER_DUPID: The XID already exists",
+ ER_DATETIME_FUNCTION_OVERFLOW: "Datetime function: %-.32s field overflow",
+ ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG: "Can't update table '%-.192s' in stored function/trigger because it is already used by statement which invoked this stored function/trigger.",
+ ER_VIEW_PREVENT_UPDATE: "The definition of table '%-.192s' prevents operation %.192s on table '%-.192s'.",
+ ER_PS_NO_RECURSION: "The prepared statement contains a stored routine call that refers to that same statement. It's not allowed to execute a prepared statement in such a recursive manner",
+ ER_SP_CANT_SET_AUTOCOMMIT: "Not allowed to set autocommit from a stored function or trigger",
+ ER_MALFORMED_DEFINER: "Definer is not fully qualified",
+ ER_VIEW_FRM_NO_USER: "View '%-.192s'.'%-.192s' has no definer information (old table format). Current user is used as definer. Please recreate the view!",
+ ER_VIEW_OTHER_USER: "You need the SUPER privilege for creation view with '%-.192s'@'%-.192s' definer",
+ ER_NO_SUCH_USER: "The user specified as a definer ('%-.64s'@'%-.64s') does not exist",
+ ER_FORBID_SCHEMA_CHANGE: "Changing schema from '%-.192s' to '%-.192s' is not allowed.",
+ ER_ROW_IS_REFERENCED_2: "Cannot delete or update a parent row: a foreign key constraint fails (%.192s)",
+ ER_NO_REFERENCED_ROW_2: "Cannot add or update a child row: a foreign key constraint fails (%.192s)",
+ ER_SP_BAD_VAR_SHADOW: "Variable '%-.64s' must be quoted with `...`, or renamed",
+ ER_TRG_NO_DEFINER: "No definer attribute for trigger '%-.192s'.'%-.192s'. The trigger will be activated under the authorization of the invoker, which may have insufficient privileges. Please recreate the trigger.",
+ ER_OLD_FILE_FORMAT: "'%-.192s' has an old format, you should re-create the '%s' object(s)",
+ ER_SP_RECURSION_LIMIT: "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.192s",
+ ER_SP_PROC_TABLE_CORRUPT: "Failed to load routine %-.192s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)",
+ ER_SP_WRONG_NAME: "Incorrect routine name '%-.192s'",
+ ER_TABLE_NEEDS_UPGRADE: "Table upgrade required. Please do \"REPAIR TABLE `%-.32s`\" or dump/reload to fix it!",
+ ER_SP_NO_AGGREGATE: "AGGREGATE is not supported for stored functions",
+ ER_MAX_PREPARED_STMT_COUNT_REACHED: "Can't create more than max_prepared_stmt_count statements (current value: %lu)",
+ ER_VIEW_RECURSIVE: "`%-.192s`.`%-.192s` contains view recursion",
+ ER_NON_GROUPING_FIELD_USED: "Non-grouping field '%-.192s' is used in %-.64s clause",
+ ER_TABLE_CANT_HANDLE_SPKEYS: "The used table type doesn't support SPATIAL indexes",
+ ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA: "Triggers can not be created on system tables",
+ ER_REMOVED_SPACES: "Leading spaces are removed from name '%s'",
+ ER_AUTOINC_READ_FAILED: "Failed to read auto-increment value from storage engine",
+ ER_USERNAME: "user name",
+ ER_HOSTNAME: "host name",
+ ER_WRONG_STRING_LENGTH: "String '%-.70s' is too long for %s (should be no longer than %d)",
+ ER_NON_INSERTABLE_TABLE: "The target table %-.100s of the %s is not insertable-into",
+ ER_ADMIN_WRONG_MRG_TABLE: "Table '%-.64s' is differently defined or of non-MyISAM type or doesn't exist",
+ ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT: "Too high level of nesting for select",
+ ER_NAME_BECOMES_EMPTY: "Name '%-.64s' has become ''",
+ ER_AMBIGUOUS_FIELD_TERM: "First character of the FIELDS TERMINATED string is ambiguous; please use non-optional and non-empty FIELDS ENCLOSED BY",
+ ER_FOREIGN_SERVER_EXISTS: "The foreign server, %s, you are trying to create already exists.",
+ ER_FOREIGN_SERVER_DOESNT_EXIST: "The foreign server name you are trying to reference does not exist. Data source error: %-.64s",
+ ER_ILLEGAL_HA_CREATE_OPTION: "Table storage engine '%-.64s' does not support the create option '%.64s'",
+ ER_PARTITION_REQUIRES_VALUES_ERROR: "Syntax error: %-.64s PARTITIONING requires definition of VALUES %-.64s for each partition",
+ ER_PARTITION_WRONG_VALUES_ERROR: "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition",
+ ER_PARTITION_MAXVALUE_ERROR: "MAXVALUE can only be used in last partition definition",
+ ER_PARTITION_SUBPARTITION_ERROR: "Subpartitions can only be hash partitions and by key",
+ ER_PARTITION_SUBPART_MIX_ERROR: "Must define subpartitions on all partitions if on one partition",
+ ER_PARTITION_WRONG_NO_PART_ERROR: "Wrong number of partitions defined, mismatch with previous setting",
+ ER_PARTITION_WRONG_NO_SUBPART_ERROR: "Wrong number of subpartitions defined, mismatch with previous setting",
+ ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR: "Constant, random or timezone-dependent expressions in (sub)partitioning function are not allowed",
+ ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR: "Expression in RANGE/LIST VALUES must be constant",
+ ER_FIELD_NOT_FOUND_PART_ERROR: "Field in list of fields for partition function not found in table",
+ ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR: "List of fields is only allowed in KEY partitions",
+ ER_INCONSISTENT_PARTITION_INFO_ERROR: "The partition info in the frm file is not consistent with what can be written into the frm file",
+ ER_PARTITION_FUNC_NOT_ALLOWED_ERROR: "The %-.192s function returns the wrong type",
+ ER_PARTITIONS_MUST_BE_DEFINED_ERROR: "For %-.64s partitions each partition must be defined",
+ ER_RANGE_NOT_INCREASING_ERROR: "VALUES LESS THAN value must be strictly increasing for each partition",
+ ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR: "VALUES value must be of same type as partition function",
+ ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR: "Multiple definition of same constant in list partitioning",
+ ER_PARTITION_ENTRY_ERROR: "Partitioning can not be used stand-alone in query",
+ ER_MIX_HANDLER_ERROR: "The mix of handlers in the partitions is not allowed in this version of MySQL",
+ ER_PARTITION_NOT_DEFINED_ERROR: "For the partitioned engine it is necessary to define all %-.64s",
+ ER_TOO_MANY_PARTITIONS_ERROR: "Too many partitions (including subpartitions) were defined",
+ ER_SUBPARTITION_ERROR: "It is only possible to mix RANGE/LIST partitioning with HASH/KEY partitioning for subpartitioning",
+ ER_CANT_CREATE_HANDLER_FILE: "Failed to create specific handler file",
+ ER_BLOB_FIELD_IN_PART_FUNC_ERROR: "A BLOB field is not allowed in partition function",
+ ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF: "A %-.192s must include all columns in the table's partitioning function",
+ ER_NO_PARTS_ERROR: "Number of %-.64s = 0 is not an allowed value",
+ ER_PARTITION_MGMT_ON_NONPARTITIONED: "Partition management on a not partitioned table is not possible",
+ ER_FOREIGN_KEY_ON_PARTITIONED: "Foreign key clause is not yet supported in conjunction with partitioning",
+ ER_DROP_PARTITION_NON_EXISTENT: "Error in list of partitions to %-.64s",
+ ER_DROP_LAST_PARTITION: "Cannot remove all partitions, use DROP TABLE instead",
+ ER_COALESCE_ONLY_ON_HASH_PARTITION: "COALESCE PARTITION can only be used on HASH/KEY partitions",
+ ER_REORG_HASH_ONLY_ON_SAME_NO: "REORGANIZE PARTITION can only be used to reorganize partitions not to change their numbers",
+ ER_REORG_NO_PARAM_ERROR: "REORGANIZE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs",
+ ER_ONLY_ON_RANGE_LIST_PARTITION: "%-.64s PARTITION can only be used on RANGE/LIST partitions",
+ ER_ADD_PARTITION_SUBPART_ERROR: "Trying to Add partition(s) with wrong number of subpartitions",
+ ER_ADD_PARTITION_NO_NEW_PARTITION: "At least one partition must be added",
+ ER_COALESCE_PARTITION_NO_PARTITION: "At least one partition must be coalesced",
+ ER_REORG_PARTITION_NOT_EXIST: "More partitions to reorganize than there are partitions",
+ ER_SAME_NAME_PARTITION: "Duplicate partition name %-.192s",
+ ER_NO_BINLOG_ERROR: "It is not allowed to shut off binlog on this command",
+ ER_CONSECUTIVE_REORG_PARTITIONS: "When reorganizing a set of partitions they must be in consecutive order",
+ ER_REORG_OUTSIDE_RANGE: "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range",
+ ER_PARTITION_FUNCTION_FAILURE: "Partition function not supported in this version for this handler",
+ ER_PART_STATE_ERROR: "Partition state cannot be defined from CREATE/ALTER TABLE",
+ ER_LIMITED_PART_RANGE: "The %-.64s handler only supports 32 bit integers in VALUES",
+ ER_PLUGIN_IS_NOT_LOADED: "Plugin '%-.192s' is not loaded",
+ ER_WRONG_VALUE: "Incorrect %-.32s value: '%-.128s'",
+ ER_NO_PARTITION_FOR_GIVEN_VALUE: "Table has no partition for value %-.64s",
+ ER_FILEGROUP_OPTION_ONLY_ONCE: "It is not allowed to specify %s more than once",
+ ER_CREATE_FILEGROUP_FAILED: "Failed to create %s",
+ ER_DROP_FILEGROUP_FAILED: "Failed to drop %s",
+ ER_TABLESPACE_AUTO_EXTEND_ERROR: "The handler doesn't support autoextend of tablespaces",
+ ER_WRONG_SIZE_NUMBER: "A size parameter was incorrectly specified, either number or on the form 10M",
+ ER_SIZE_OVERFLOW_ERROR: "The size number was correct but we don't allow the digit part to be more than 2 billion",
+ ER_ALTER_FILEGROUP_FAILED: "Failed to alter: %s",
+ ER_BINLOG_ROW_LOGGING_FAILED: "Writing one row to the row-based binary log failed",
+ ER_BINLOG_ROW_WRONG_TABLE_DEF: "Table definition on master and slave does not match: %s",
+ ER_BINLOG_ROW_RBR_TO_SBR: "Slave running with --log-slave-updates must use row-based binary logging to be able to replicate row-based binary log events",
+ ER_EVENT_ALREADY_EXISTS: "Event '%-.192s' already exists",
+ ER_EVENT_STORE_FAILED: "Failed to store event %s. Error code %d from storage engine.",
+ ER_EVENT_DOES_NOT_EXIST: "Unknown event '%-.192s'",
+ ER_EVENT_CANT_ALTER: "Failed to alter event '%-.192s'",
+ ER_EVENT_DROP_FAILED: "Failed to drop %s",
+ ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG: "INTERVAL is either not positive or too big",
+ ER_EVENT_ENDS_BEFORE_STARTS: "ENDS is either invalid or before STARTS",
+ ER_EVENT_EXEC_TIME_IN_THE_PAST: "Event execution time is in the past. Event has been disabled",
+ ER_EVENT_OPEN_TABLE_FAILED: "Failed to open mysql.event",
+ ER_EVENT_NEITHER_M_EXPR_NOR_M_AT: "No datetime expression provided",
+ ER_OBSOLETE_COL_COUNT_DOESNT_MATCH_CORRUPTED: "Column count of mysql.%s is wrong. Expected %d, found %d. The table is probably corrupted",
+ ER_OBSOLETE_CANNOT_LOAD_FROM_TABLE: "Cannot load from mysql.%s. The table is probably corrupted",
+ ER_EVENT_CANNOT_DELETE: "Failed to delete the event from mysql.event",
+ ER_EVENT_COMPILE_ERROR: "Error during compilation of event's body",
+ ER_EVENT_SAME_NAME: "Same old and new event name",
+ ER_EVENT_DATA_TOO_LONG: "Data for column '%s' too long",
+ ER_DROP_INDEX_FK: "Cannot drop index '%-.192s': needed in a foreign key constraint",
+ ER_WARN_DEPRECATED_SYNTAX_WITH_VER: "The syntax '%s' is deprecated and will be removed in MySQL %s. Please use %s instead",
+ ER_CANT_WRITE_LOCK_LOG_TABLE: "You can't write-lock a log table. Only read access is possible",
+ ER_CANT_LOCK_LOG_TABLE: "You can't use locks with log tables.",
+ ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED: "Upholding foreign key constraints for table '%.192s', entry '%-.192s', key %d would lead to a duplicate entry",
+ ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE: "Column count of mysql.%s is wrong. Expected %d, found %d. Created with MySQL %d, now running %d. Please use mysql_upgrade to fix this error.",
+ ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR: "Cannot switch out of the row-based binary log format when the session has open temporary tables",
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT: "Cannot change the binary logging format inside a stored function or trigger",
+ ER_NDB_CANT_SWITCH_BINLOG_FORMAT: "The NDB cluster engine does not support changing the binlog format on the fly yet",
+ ER_PARTITION_NO_TEMPORARY: "Cannot create temporary table with partitions",
+ ER_PARTITION_CONST_DOMAIN_ERROR: "Partition constant is out of partition function domain",
+ ER_PARTITION_FUNCTION_IS_NOT_ALLOWED: "This partition function is not allowed",
+ ER_DDL_LOG_ERROR: "Error in DDL log",
+ ER_NULL_IN_VALUES_LESS_THAN: "Not allowed to use NULL value in VALUES LESS THAN",
+ ER_WRONG_PARTITION_NAME: "Incorrect partition name",
+ ER_CANT_CHANGE_TX_CHARACTERISTICS: "Transaction characteristics can't be changed while a transaction is in progress",
+ ER_DUP_ENTRY_AUTOINCREMENT_CASE: "ALTER TABLE causes auto_increment resequencing, resulting in duplicate entry '%-.192s' for key '%-.192s'",
+ ER_EVENT_MODIFY_QUEUE_ERROR: "Internal scheduler error %d",
+ ER_EVENT_SET_VAR_ERROR: "Error during starting/stopping of the scheduler. Error code %u",
+ ER_PARTITION_MERGE_ERROR: "Engine cannot be used in partitioned tables",
+ ER_CANT_ACTIVATE_LOG: "Cannot activate '%-.64s' log",
+ ER_RBR_NOT_AVAILABLE: "The server was not built with row-based replication",
+ ER_BASE64_DECODE_ERROR: "Decoding of base64 string failed",
+ ER_EVENT_RECURSION_FORBIDDEN: "Recursion of EVENT DDL statements is forbidden when body is present",
+ ER_EVENTS_DB_ERROR: "Cannot proceed because system tables used by Event Scheduler were found damaged at server start",
+ ER_ONLY_INTEGERS_ALLOWED: "Only integers allowed as number here",
+ ER_UNSUPORTED_LOG_ENGINE: "This storage engine cannot be used for log tables\"",
+ ER_BAD_LOG_STATEMENT: "You cannot '%s' a log table if logging is enabled",
+ ER_CANT_RENAME_LOG_TABLE: "Cannot rename '%s'. When logging enabled, rename to/from log table must rename two tables: the log table to an archive table and another table back to '%s'",
+ ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: "Incorrect parameter count in the call to native function '%-.192s'",
+ ER_WRONG_PARAMETERS_TO_NATIVE_FCT: "Incorrect parameters in the call to native function '%-.192s'",
+ ER_WRONG_PARAMETERS_TO_STORED_FCT: "Incorrect parameters in the call to stored function '%-.192s'",
+ ER_NATIVE_FCT_NAME_COLLISION: "This function '%-.192s' has the same name as a native function",
+ ER_DUP_ENTRY_WITH_KEY_NAME: "Duplicate entry '%-.64s' for key '%-.192s'",
+ ER_BINLOG_PURGE_EMFILE: "Too many files opened, please execute the command again",
+ ER_EVENT_CANNOT_CREATE_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was dropped immediately after creation.",
+ ER_EVENT_CANNOT_ALTER_IN_THE_PAST: "Event execution time is in the past and ON COMPLETION NOT PRESERVE is set. The event was not changed. Specify a time in the future.",
+ ER_SLAVE_INCIDENT: "The incident %s occured on the master. Message: %-.64s",
+ ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT: "Table has no partition for some existing values",
+ ER_BINLOG_UNSAFE_STATEMENT: "Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT. %s",
+ ER_SLAVE_FATAL_ERROR: "Fatal error: %s",
+ ER_SLAVE_RELAY_LOG_READ_FAILURE: "Relay log read failure: %s",
+ ER_SLAVE_RELAY_LOG_WRITE_FAILURE: "Relay log write failure: %s",
+ ER_SLAVE_CREATE_EVENT_FAILURE: "Failed to create %s",
+ ER_SLAVE_MASTER_COM_FAILURE: "Master command %s failed: %s",
+ ER_BINLOG_LOGGING_IMPOSSIBLE: "Binary logging not possible. Message: %s",
+ ER_VIEW_NO_CREATION_CTX: "View `%-.64s`.`%-.64s` has no creation context",
+ ER_VIEW_INVALID_CREATION_CTX: "Creation context of view `%-.64s`.`%-.64s' is invalid",
+ ER_SR_INVALID_CREATION_CTX: "Creation context of stored routine `%-.64s`.`%-.64s` is invalid",
+ ER_TRG_CORRUPTED_FILE: "Corrupted TRG file for table `%-.64s`.`%-.64s`",
+ ER_TRG_NO_CREATION_CTX: "Triggers for table `%-.64s`.`%-.64s` have no creation context",
+ ER_TRG_INVALID_CREATION_CTX: "Trigger creation context of table `%-.64s`.`%-.64s` is invalid",
+ ER_EVENT_INVALID_CREATION_CTX: "Creation context of event `%-.64s`.`%-.64s` is invalid",
+ ER_TRG_CANT_OPEN_TABLE: "Cannot open table for trigger `%-.64s`.`%-.64s`",
+ ER_CANT_CREATE_SROUTINE: "Cannot create stored routine `%-.64s`. Check warnings",
+ ER_NEVER_USED: "Ambiguous slave modes combination. %s",
+ ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT: "The BINLOG statement of type `%s` was not preceded by a format description BINLOG statement.",
+ ER_SLAVE_CORRUPT_EVENT: "Corrupted replication event was detected",
+ ER_LOAD_DATA_INVALID_COLUMN: "Invalid column reference (%-.64s) in LOAD DATA",
+ ER_LOG_PURGE_NO_FILE: "Being purged log %s was not found",
+ ER_XA_RBTIMEOUT: "XA_RBTIMEOUT: Transaction branch was rolled back: took too long",
+ ER_XA_RBDEADLOCK: "XA_RBDEADLOCK: Transaction branch was rolled back: deadlock was detected",
+ ER_NEED_REPREPARE: "Prepared statement needs to be re-prepared",
+ ER_DELAYED_NOT_SUPPORTED: "DELAYED option not supported for table '%-.192s'",
+ WARN_NO_MASTER_INFO: "The master info structure does not exist",
+ WARN_OPTION_IGNORED: "<%-.64s> option ignored",
+ WARN_PLUGIN_DELETE_BUILTIN: "Built-in plugins cannot be deleted",
+ WARN_PLUGIN_BUSY: "Plugin is busy and will be uninstalled on shutdown",
+ ER_VARIABLE_IS_READONLY: "%s variable '%s' is read-only. Use SET %s to assign the value",
+ ER_WARN_ENGINE_TRANSACTION_ROLLBACK: "Storage engine %s does not support rollback for this statement. Transaction rolled back and must be restarted",
+ ER_SLAVE_HEARTBEAT_FAILURE: "Unexpected master's heartbeat data: %s",
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE: "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds).",
+ ER_NDB_REPLICATION_SCHEMA_ERROR: "Bad schema for mysql.ndb_replication table. Message: %-.64s",
+ ER_CONFLICT_FN_PARSE_ERROR: "Error in parsing conflict function. Message: %-.64s",
+ ER_EXCEPTIONS_WRITE_ERROR: "Write to exceptions table failed. Message: %-.128s\"",
+ ER_TOO_LONG_TABLE_COMMENT: "Comment for table '%-.64s' is too long (max = %lu)",
+ ER_TOO_LONG_FIELD_COMMENT: "Comment for field '%-.64s' is too long (max = %lu)",
+ ER_FUNC_INEXISTENT_NAME_COLLISION: "FUNCTION %s does not exist. Check the 'Function Name Parsing and Resolution' section in the Reference Manual",
+ ER_DATABASE_NAME: "Database",
+ ER_TABLE_NAME: "Table",
+ ER_PARTITION_NAME: "Partition",
+ ER_SUBPARTITION_NAME: "Subpartition",
+ ER_TEMPORARY_NAME: "Temporary",
+ ER_RENAMED_NAME: "Renamed",
+ ER_TOO_MANY_CONCURRENT_TRXS: "Too many active concurrent transactions",
+ WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED: "Non-ASCII separator arguments are not fully supported",
+ ER_DEBUG_SYNC_TIMEOUT: "debug sync point wait timed out",
+ ER_DEBUG_SYNC_HIT_LIMIT: "debug sync point hit limit reached",
+ ER_DUP_SIGNAL_SET: "Duplicate condition information item '%s'",
+ ER_SIGNAL_WARN: "Unhandled user-defined warning condition",
+ ER_SIGNAL_NOT_FOUND: "Unhandled user-defined not found condition",
+ ER_SIGNAL_EXCEPTION: "Unhandled user-defined exception condition",
+ ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: "RESIGNAL when handler not active",
+ ER_SIGNAL_BAD_CONDITION_TYPE: "SIGNAL/RESIGNAL can only use a CONDITION defined with SQLSTATE",
+ WARN_COND_ITEM_TRUNCATED: "Data truncated for condition item '%s'",
+ ER_COND_ITEM_TOO_LONG: "Data too long for condition item '%s'",
+ ER_UNKNOWN_LOCALE: "Unknown locale: '%-.64s'",
+ ER_SLAVE_IGNORE_SERVER_IDS: "The requested server id %d clashes with the slave startup option --replicate-same-server-id",
+ ER_QUERY_CACHE_DISABLED: "Query cache is disabled; restart the server with query_cache_type=1 to enable it",
+ ER_SAME_NAME_PARTITION_FIELD: "Duplicate partition field name '%-.192s'",
+ ER_PARTITION_COLUMN_LIST_ERROR: "Inconsistency in usage of column lists for partitioning",
+ ER_WRONG_TYPE_COLUMN_VALUE_ERROR: "Partition column values of incorrect type",
+ ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR: "Too many fields in '%-.192s'",
+ ER_MAXVALUE_IN_VALUES_IN: "Cannot use MAXVALUE as value in VALUES IN",
+ ER_TOO_MANY_VALUES_ERROR: "Cannot have more than one value for this type of %-.64s partitioning",
+ ER_ROW_SINGLE_PARTITION_FIELD_ERROR: "Row expressions in VALUES IN only allowed for multi-field column partitioning",
+ ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD: "Field '%-.192s' is of a not allowed type for this type of partitioning",
+ ER_PARTITION_FIELDS_TOO_LONG: "The total length of the partitioning fields is too large",
+ ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since both row-incapable engines and statement-incapable engines are involved.",
+ ER_BINLOG_ROW_MODE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = ROW and at least one table uses a storage engine limited to statement-based logging.",
+ ER_BINLOG_UNSAFE_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since statement is unsafe, storage engine is limited to statement-based logging, and BINLOG_FORMAT = MIXED. %s",
+ ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE: "Cannot execute statement: impossible to write to binary log since statement is in row format and at least one table uses a storage engine limited to statement-based logging.",
+ ER_BINLOG_STMT_MODE_AND_ROW_ENGINE: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT and at least one table uses a storage engine limited to row-based logging.%s",
+ ER_BINLOG_ROW_INJECTION_AND_STMT_MODE: "Cannot execute statement: impossible to write to binary log since statement is in row format and BINLOG_FORMAT = STATEMENT.",
+ ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE: "Cannot execute statement: impossible to write to binary log since more than one engine is involved and at least one engine is self-logging.",
+ ER_BINLOG_UNSAFE_LIMIT: "The statement is unsafe because it uses a LIMIT clause. This is unsafe because the set of rows included cannot be predicted.",
+ ER_BINLOG_UNSAFE_INSERT_DELAYED: "The statement is unsafe because it uses INSERT DELAYED. This is unsafe because the times when rows are inserted cannot be predicted.",
+ ER_BINLOG_UNSAFE_SYSTEM_TABLE: "The statement is unsafe because it uses the general log, slow query log, or performance_schema table(s). This is unsafe because system tables may differ on slaves.",
+ ER_BINLOG_UNSAFE_AUTOINC_COLUMNS: "Statement is unsafe because it invokes a trigger or a stored function that inserts into an AUTO_INCREMENT column. Inserted values cannot be logged correctly.",
+ ER_BINLOG_UNSAFE_UDF: "Statement is unsafe because it uses a UDF which may not return the same value on the slave.",
+ ER_BINLOG_UNSAFE_SYSTEM_VARIABLE: "Statement is unsafe because it uses a system variable that may have a different value on the slave.",
+ ER_BINLOG_UNSAFE_SYSTEM_FUNCTION: "Statement is unsafe because it uses a system function that may return a different value on the slave.",
+ ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS: "Statement is unsafe because it accesses a non-transactional table after accessing a transactional table within the same transaction.",
+ ER_MESSAGE_AND_STATEMENT: "%s Statement: %s",
+ ER_SLAVE_CONVERSION_FAILED: "Column %d of table '%-.192s.%-.192s' cannot be converted from type '%-.32s' to type '%-.32s'",
+ ER_SLAVE_CANT_CREATE_CONVERSION: "Can't create conversion table for table '%-.192s.%-.192s'",
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT: "Cannot modify @@session.binlog_format inside a transaction",
+ ER_PATH_LENGTH: "The path specified for %.64s is too long.",
+ ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT: "'%s' is deprecated and will be removed in a future release.",
+ ER_WRONG_NATIVE_TABLE_STRUCTURE: "Native table '%-.64s'.'%-.64s' has the wrong structure",
+ ER_WRONG_PERFSCHEMA_USAGE: "Invalid performance_schema usage.",
+ ER_WARN_I_S_SKIPPED_TABLE: "Table '%s'.'%s' was skipped since its definition is being modified by concurrent DDL statement",
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT: "Cannot modify @@session.binlog_direct_non_transactional_updates inside a transaction",
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT: "Cannot change the binlog direct flag inside a stored function or trigger",
+ ER_SPATIAL_MUST_HAVE_GEOM_COL: "A SPATIAL index may only contain a geometrical type column",
+ ER_TOO_LONG_INDEX_COMMENT: "Comment for index '%-.64s' is too long (max = %lu)",
+ ER_LOCK_ABORTED: "Wait on a lock was aborted due to a pending exclusive lock",
+ ER_DATA_OUT_OF_RANGE: "%s value is out of range in '%s'",
+ ER_WRONG_SPVAR_TYPE_IN_LIMIT: "A variable of a non-integer based type in LIMIT clause",
+ ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE: "Mixing self-logging and non-self-logging engines in a statement is unsafe.",
+ ER_BINLOG_UNSAFE_MIXED_STATEMENT: "Statement accesses nontransactional table as well as transactional or temporary table, and writes to any of them.",
+ ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN: "Cannot modify @@session.sql_log_bin inside a transaction",
+ ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN: "Cannot change the sql_log_bin inside a stored function or trigger",
+ ER_FAILED_READ_FROM_PAR_FILE: "Failed to read from the .par file",
+ ER_VALUES_IS_NOT_INT_TYPE_ERROR: "VALUES value for partition '%-.64s' must have type INT",
+ ER_ACCESS_DENIED_NO_PASSWORD_ERROR: "Access denied for user '%-.48s'@'%-.64s'",
+ ER_SET_PASSWORD_AUTH_PLUGIN: "SET PASSWORD has no significance for users authenticating via plugins",
+ ER_GRANT_PLUGIN_USER_EXISTS: "GRANT with IDENTIFIED WITH is illegal because the user %-.*s already exists",
+ ER_TRUNCATE_ILLEGAL_FK: "Cannot truncate a table referenced in a foreign key constraint (%.192s)",
+ ER_PLUGIN_IS_PERMANENT: "Plugin '%s' is force_plus_permanent and can not be unloaded",
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN: "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.",
+ ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX: "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.",
+ ER_STMT_CACHE_FULL: "Multi-row statements required more than 'max_binlog_stmt_cache_size' bytes of storage; increase this mysqld variable and try again",
+ ER_MULTI_UPDATE_KEY_CONFLICT: "Primary key/partition key update is not allowed since the table is updated both as '%-.192s' and '%-.192s'.",
+ ER_TABLE_NEEDS_REBUILD: "Table rebuild required. Please do \"ALTER TABLE `%-.32s` FORCE\" or dump/reload to fix it!",
+ WARN_OPTION_BELOW_LIMIT: "The value of '%s' should be no less than the value of '%s'",
+ ER_INDEX_COLUMN_TOO_LONG: "Index column size too large. The maximum column size is %lu bytes.",
+ ER_ERROR_IN_TRIGGER_BODY: "Trigger '%-.64s' has an error in its body: '%-.256s'",
+ ER_ERROR_IN_UNKNOWN_TRIGGER_BODY: "Unknown trigger has an error in its body: '%-.256s'",
+ ER_INDEX_CORRUPT: "Index %s is corrupted",
+ ER_UNDO_RECORD_TOO_BIG: "Undo log record is too big.",
+ ER_BINLOG_UNSAFE_INSERT_IGNORE_SELECT: "INSERT IGNORE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_INSERT_SELECT_UPDATE: "INSERT... SELECT... ON DUPLICATE KEY UPDATE is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are updated. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_REPLACE_SELECT: "REPLACE... SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_CREATE_IGNORE_SELECT: "CREATE... IGNORE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_CREATE_REPLACE_SELECT: "CREATE... REPLACE SELECT is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are replaced. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_UPDATE_IGNORE: "UPDATE IGNORE is unsafe because the order in which rows are updated determines which (if any) rows are ignored. This order cannot be predicted and may differ on master and the slave.",
+ ER_PLUGIN_NO_UNINSTALL: "Plugin '%s' is marked as not dynamically uninstallable. You have to stop the server to uninstall it.",
+ ER_PLUGIN_NO_INSTALL: "Plugin '%s' is marked as not dynamically installable. You have to stop the server to install it.",
+ ER_BINLOG_UNSAFE_WRITE_AUTOINC_SELECT: "Statements writing to a table with an auto-increment column after selecting from another table are unsafe because the order in which rows are retrieved determines what (if any) rows will be written. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_CREATE_SELECT_AUTOINC: "CREATE TABLE... SELECT... on a table with an auto-increment column is unsafe because the order in which rows are retrieved by the SELECT determines which (if any) rows are inserted. This order cannot be predicted and may differ on master and the slave.",
+ ER_BINLOG_UNSAFE_INSERT_TWO_KEYS: "INSERT... ON DUPLICATE KEY UPDATE on a table with more than one UNIQUE KEY is unsafe",
+ ER_TABLE_IN_FK_CHECK: "Table is being used in foreign key check.",
+ ER_UNSUPPORTED_ENGINE: "Storage engine '%s' does not support system tables. [%s.%s]",
+ ER_BINLOG_UNSAFE_AUTOINC_NOT_FIRST: "INSERT into autoincrement field which is not the first part in the composed primary key is unsafe.",
+ ER_CANNOT_LOAD_FROM_TABLE_V2: "Cannot load from %s.%s. The table is probably corrupted",
+ ER_MASTER_DELAY_VALUE_OUT_OF_RANGE: "The requested value %u for the master delay exceeds the maximum %u",
+ ER_ONLY_FD_AND_RBR_EVENTS_ALLOWED_IN_BINLOG_STATEMENT: "Only Format_description_log_event and row events are allowed in BINLOG statements (but %s was provided)",
+ ER_PARTITION_EXCHANGE_DIFFERENT_OPTION: "Non matching attribute '%-.64s' between partition and table",
+ ER_PARTITION_EXCHANGE_PART_TABLE: "Table to exchange with partition is partitioned: '%-.64s'",
+ ER_PARTITION_EXCHANGE_TEMP_TABLE: "Table to exchange with partition is temporary: '%-.64s'",
+ ER_PARTITION_INSTEAD_OF_SUBPARTITION: "Subpartitioned table, use subpartition instead of partition",
+ ER_UNKNOWN_PARTITION: "Unknown partition '%-.64s' in table '%-.64s'",
+ ER_TABLES_DIFFERENT_METADATA: "Tables have different definitions",
+ ER_ROW_DOES_NOT_MATCH_PARTITION: "Found a row that does not match the partition",
+ ER_BINLOG_CACHE_SIZE_GREATER_THAN_MAX: "Option binlog_cache_size (%lu) is greater than max_binlog_cache_size (%lu); setting binlog_cache_size equal to max_binlog_cache_size.",
+ ER_WARN_INDEX_NOT_APPLICABLE: "Cannot use %-.64s access on index '%-.64s' due to type or collation conversion on field '%-.64s'",
+ ER_PARTITION_EXCHANGE_FOREIGN_KEY: "Table to exchange with partition has foreign key references: '%-.64s'",
+ ER_NO_SUCH_KEY_VALUE: "Key value '%-.192s' was not found in table '%-.192s.%-.192s'",
+ ER_RPL_INFO_DATA_TOO_LONG: "Data for column '%s' too long",
+ ER_NETWORK_READ_EVENT_CHECKSUM_FAILURE: "Replication event checksum verification failed while reading from network.",
+ ER_BINLOG_READ_EVENT_CHECKSUM_FAILURE: "Replication event checksum verification failed while reading from a log file.",
+ ER_BINLOG_STMT_CACHE_SIZE_GREATER_THAN_MAX: "Option binlog_stmt_cache_size (%lu) is greater than max_binlog_stmt_cache_size (%lu); setting binlog_stmt_cache_size equal to max_binlog_stmt_cache_size.",
+ ER_CANT_UPDATE_TABLE_IN_CREATE_TABLE_SELECT: "Can't update table '%-.192s' while '%-.192s' is being created.",
+ ER_PARTITION_CLAUSE_ON_NONPARTITIONED: "PARTITION () clause on non partitioned table",
+ ER_ROW_DOES_NOT_MATCH_GIVEN_PARTITION_SET: "Found a row not matching the given partition set",
+ ER_NO_SUCH_PARTITION__UNUSED: "partition '%-.64s' doesn't exist",
+ ER_CHANGE_RPL_INFO_REPOSITORY_FAILURE: "Failure while changing the type of replication repository: %s.",
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_CREATED_TEMP_TABLE: "The creation of some temporary tables could not be rolled back.",
+ ER_WARNING_NOT_COMPLETE_ROLLBACK_WITH_DROPPED_TEMP_TABLE: "Some temporary tables were dropped, but these operations could not be rolled back.",
+ ER_MTS_FEATURE_IS_NOT_SUPPORTED: "%s is not supported in multi-threaded slave mode. %s",
+ ER_MTS_UPDATED_DBS_GREATER_MAX: "The number of modified databases exceeds the maximum %d; the database names will not be included in the replication event metadata.",
+ ER_MTS_CANT_PARALLEL: "Cannot execute the current event group in the parallel mode. Encountered event %s, relay-log name %s, position %s which prevents execution of this event group in parallel mode. Reason: %s.",
+ ER_MTS_INCONSISTENT_DATA: "%s",
+ ER_FULLTEXT_NOT_SUPPORTED_WITH_PARTITIONING: "FULLTEXT index is not supported for partitioned tables.",
+ ER_DA_INVALID_CONDITION_NUMBER: "Invalid condition number",
+ ER_INSECURE_PLAIN_TEXT: "Sending passwords in plain text without SSL/TLS is extremely insecure.",
+ ER_INSECURE_CHANGE_MASTER: "Storing MySQL user name or password information in the master.info repository is not secure and is therefore not recommended. Please see the MySQL Manual for more about this issue and possible alternatives.",
+ ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in table '%.192s', key '%.192s'",
+ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: "Foreign key constraint for table '%.192s', record '%-.192s' would lead to a duplicate entry in a child table",
+ ER_SQLTHREAD_WITH_SECURE_SLAVE: "Setting authentication options is not possible when only the Slave SQL Thread is being started.",
+ ER_TABLE_HAS_NO_FT: "The table does not have FULLTEXT index to support this query",
+ ER_VARIABLE_NOT_SETTABLE_IN_SF_OR_TRIGGER: "The system variable %.200s cannot be set in stored functions or triggers.",
+ ER_VARIABLE_NOT_SETTABLE_IN_TRANSACTION: "The system variable %.200s cannot be set when there is an ongoing transaction.",
+ ER_GTID_NEXT_IS_NOT_IN_GTID_NEXT_LIST: "The system variable @@SESSION.GTID_NEXT has the value %.200s, which is not listed in @@SESSION.GTID_NEXT_LIST.",
+ ER_CANT_CHANGE_GTID_NEXT_IN_TRANSACTION_WHEN_GTID_NEXT_LIST_IS_NULL: "When @@SESSION.GTID_NEXT_LIST == NULL, the system variable @@SESSION.GTID_NEXT cannot change inside a transaction.",
+ ER_SET_STATEMENT_CANNOT_INVOKE_FUNCTION: "The statement 'SET %.200s' cannot invoke a stored function.",
+ ER_GTID_NEXT_CANT_BE_AUTOMATIC_IF_GTID_NEXT_LIST_IS_NON_NULL: "The system variable @@SESSION.GTID_NEXT cannot be 'AUTOMATIC' when @@SESSION.GTID_NEXT_LIST is non-NULL.",
+ ER_SKIPPING_LOGGED_TRANSACTION: "Skipping transaction %.200s because it has already been executed and logged.",
+ ER_MALFORMED_GTID_SET_SPECIFICATION: "Malformed GTID set specification '%.200s'.",
+ ER_MALFORMED_GTID_SET_ENCODING: "Malformed GTID set encoding.",
+ ER_MALFORMED_GTID_SPECIFICATION: "Malformed GTID specification '%.200s'.",
+ ER_GNO_EXHAUSTED: "Impossible to generate Global Transaction Identifier: the integer component reached the maximal value. Restart the server with a new server_uuid.",
+ ER_BAD_SLAVE_AUTO_POSITION: "Parameters MASTER_LOG_FILE, MASTER_LOG_POS, RELAY_LOG_FILE and RELAY_LOG_POS cannot be set when MASTER_AUTO_POSITION is active.",
+ ER_AUTO_POSITION_REQUIRES_GTID_MODE_ON: "CHANGE MASTER TO MASTER_AUTO_POSITION = 1 can only be executed when @@GLOBAL.GTID_MODE = ON.",
+ ER_CANT_DO_IMPLICIT_COMMIT_IN_TRX_WHEN_GTID_NEXT_IS_SET: "Cannot execute statements with implicit commit inside a transaction when @@SESSION.GTID_NEXT != AUTOMATIC or @@SESSION.GTID_NEXT_LIST != NULL.",
+ ER_GTID_MODE_2_OR_3_REQUIRES_ENFORCE_GTID_CONSISTENCY_ON: "@@GLOBAL.GTID_MODE = ON or UPGRADE_STEP_2 requires @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1.",
+ ER_GTID_MODE_REQUIRES_BINLOG: "@@GLOBAL.GTID_MODE = ON or UPGRADE_STEP_1 or UPGRADE_STEP_2 requires --log-bin and --log-slave-updates.",
+ ER_CANT_SET_GTID_NEXT_TO_GTID_WHEN_GTID_MODE_IS_OFF: "@@SESSION.GTID_NEXT cannot be set to UUID:NUMBER when @@GLOBAL.GTID_MODE = OFF.",
+ ER_CANT_SET_GTID_NEXT_TO_ANONYMOUS_WHEN_GTID_MODE_IS_ON: "@@SESSION.GTID_NEXT cannot be set to ANONYMOUS when @@GLOBAL.GTID_MODE = ON.",
+ ER_CANT_SET_GTID_NEXT_LIST_TO_NON_NULL_WHEN_GTID_MODE_IS_OFF: "@@SESSION.GTID_NEXT_LIST cannot be set to a non-NULL value when @@GLOBAL.GTID_MODE = OFF.",
+ ER_FOUND_GTID_EVENT_WHEN_GTID_MODE_IS_OFF: "Found a Gtid_log_event or Previous_gtids_log_event when @@GLOBAL.GTID_MODE = OFF.",
+ ER_GTID_UNSAFE_NON_TRANSACTIONAL_TABLE: "When @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, updates to non-transactional tables can only be done in either autocommitted statements or single-statement transactions, and never in the same statement as updates to transactional tables.",
+ ER_GTID_UNSAFE_CREATE_SELECT: "CREATE TABLE ... SELECT is forbidden when @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1.",
+ ER_GTID_UNSAFE_CREATE_DROP_TEMPORARY_TABLE_IN_TRANSACTION: "When @@GLOBAL.ENFORCE_GTID_CONSISTENCY = 1, the statements CREATE TEMPORARY TABLE and DROP TEMPORARY TABLE can be executed in a non-transactional context only, and require that AUTOCOMMIT = 1.",
+ ER_GTID_MODE_CAN_ONLY_CHANGE_ONE_STEP_AT_A_TIME: "The value of @@GLOBAL.GTID_MODE can only change one step at a time: OFF <-> UPGRADE_STEP_1 <-> UPGRADE_STEP_2 <-> ON. Also note that this value must be stepped up or down simultaneously on all servers; see the Manual for instructions.",
+ ER_MASTER_HAS_PURGED_REQUIRED_GTIDS: "The slave is connecting using CHANGE MASTER TO MASTER_AUTO_POSITION = 1, but the master has purged binary logs containing GTIDs that the slave requires.",
+ ER_CANT_SET_GTID_NEXT_WHEN_OWNING_GTID: "@@SESSION.GTID_NEXT cannot be changed by a client that owns a GTID. The client owns %s. Ownership is released on COMMIT or ROLLBACK.",
+ ER_UNKNOWN_EXPLAIN_FORMAT: "Unknown EXPLAIN format name: '%s'",
+ ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: "Cannot execute statement in a READ ONLY transaction.",
+ ER_TOO_LONG_TABLE_PARTITION_COMMENT: "Comment for table partition '%-.64s' is too long (max = %lu)",
+ ER_SLAVE_CONFIGURATION: "Slave is not configured or failed to initialize properly. You must at least set --server-id to enable either a master or a slave. Additional error messages can be found in the MySQL error log.",
+ ER_INNODB_FT_LIMIT: "InnoDB presently supports one FULLTEXT index creation at a time",
+ ER_INNODB_NO_FT_TEMP_TABLE: "Cannot create FULLTEXT index on temporary InnoDB table",
+ ER_INNODB_FT_WRONG_DOCID_COLUMN: "Column '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
+ ER_INNODB_FT_WRONG_DOCID_INDEX: "Index '%-.192s' is of wrong type for an InnoDB FULLTEXT index",
+ ER_INNODB_ONLINE_LOG_TOO_BIG: "Creating index '%-.192s' required more than 'innodb_online_alter_log_max_size' bytes of modification log. Please try again.",
+ ER_UNKNOWN_ALTER_ALGORITHM: "Unknown ALGORITHM '%s'",
+ ER_UNKNOWN_ALTER_LOCK: "Unknown LOCK type '%s'",
+ ER_MTS_CHANGE_MASTER_CANT_RUN_WITH_GAPS: "CHANGE MASTER cannot be executed when the slave was stopped with an error or killed in MTS mode. Consider using RESET SLAVE or START SLAVE UNTIL.",
+ ER_MTS_RECOVERY_FAILURE: "Cannot recover after SLAVE errored out in parallel execution mode. Additional error messages can be found in the MySQL error log.",
+ ER_MTS_RESET_WORKERS: "Cannot clean up worker info tables. Additional error messages can be found in the MySQL error log.",
+ ER_COL_COUNT_DOESNT_MATCH_CORRUPTED_V2: "Column count of %s.%s is wrong. Expected %d, found %d. The table is probably corrupted",
+ ER_SLAVE_SILENT_RETRY_TRANSACTION: "Slave must silently retry current transaction",
+ ER_DISCARD_FK_CHECKS_RUNNING: "There is a foreign key check running on table '%-.192s'. Cannot discard the table.",
+ ER_TABLE_SCHEMA_MISMATCH: "Schema mismatch (%s)",
+ ER_TABLE_IN_SYSTEM_TABLESPACE: "Table '%-.192s' in system tablespace",
+ ER_IO_READ_ERROR: "IO Read error: (%lu, %s) %s",
+ ER_IO_WRITE_ERROR: "IO Write error: (%lu, %s) %s",
+ ER_TABLESPACE_MISSING: "Tablespace is missing for table '%-.192s'",
+ ER_TABLESPACE_EXISTS: "Tablespace for table '%-.192s' exists. Please DISCARD the tablespace before IMPORT.",
+ ER_TABLESPACE_DISCARDED: "Tablespace has been discarded for table '%-.192s'",
+ ER_INTERNAL_ERROR: "Internal error: %s",
+ ER_INNODB_IMPORT_ERROR: "ALTER TABLE '%-.192s' IMPORT TABLESPACE failed with error %lu : '%s'",
+ ER_INNODB_INDEX_CORRUPT: "Index corrupt: %s",
+ ER_INVALID_YEAR_COLUMN_LENGTH: "YEAR(%lu) column type is deprecated. Creating YEAR(4) column instead.",
+ ER_NOT_VALID_PASSWORD: "Your password does not satisfy the current policy requirements",
+ ER_MUST_CHANGE_PASSWORD: "You must SET PASSWORD before executing this statement",
+ ER_FK_NO_INDEX_CHILD: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the foreign table '%s'",
+ ER_FK_NO_INDEX_PARENT: "Failed to add the foreign key constaint. Missing index for constraint '%s' in the referenced table '%s'",
+ ER_FK_FAIL_ADD_SYSTEM: "Failed to add the foreign key constraint '%s' to system tables",
+ ER_FK_CANNOT_OPEN_PARENT: "Failed to open the referenced table '%s'",
+ ER_FK_INCORRECT_OPTION: "Failed to add the foreign key constraint on table '%s'. Incorrect options in FOREIGN KEY constraint '%s'",
+ ER_FK_DUP_NAME: "Duplicate foreign key constraint name '%s'",
+ ER_PASSWORD_FORMAT: "The password hash doesn't have the expected format. Check if the correct password algorithm is being used with the PASSWORD() function.",
+ ER_FK_COLUMN_CANNOT_DROP: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s'",
+ ER_FK_COLUMN_CANNOT_DROP_CHILD: "Cannot drop column '%-.192s': needed in a foreign key constraint '%-.192s' of table '%-.192s'",
+ ER_FK_COLUMN_NOT_NULL: "Column '%-.192s' cannot be NOT NULL: needed in a foreign key constraint '%-.192s' SET NULL",
+ ER_DUP_INDEX: "Duplicate index '%-.64s' defined on the table '%-.64s.%-.64s'. This is deprecated and will be disallowed in a future release.",
+ ER_FK_COLUMN_CANNOT_CHANGE: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s'",
+ ER_FK_COLUMN_CANNOT_CHANGE_CHILD: "Cannot change column '%-.192s': used in a foreign key constraint '%-.192s' of table '%-.192s'",
+ ER_FK_CANNOT_DELETE_PARENT: "Cannot delete rows from table which is parent in a foreign key constraint '%-.192s' of table '%-.192s'",
+ ER_MALFORMED_PACKET: "Malformed communication packet.",
+ ER_READ_ONLY_MODE: "Running in read-only mode",
+ ER_GTID_NEXT_TYPE_UNDEFINED_GROUP: "When @@SESSION.GTID_NEXT is set to a GTID, you must explicitly set it again after a COMMIT or ROLLBACK. If you see this error message in the slave SQL thread, it means that a table in the current transaction is transactional on the master and non-transactional on the slave. In a client connection, it means that you executed SET @@SESSION.GTID_NEXT before a transaction and forgot to set @@SESSION.GTID_NEXT to a different identifier or to 'AUTOMATIC' after COMMIT or ROLLBACK. Current @@SESSION.GTID_NEXT is '%s'.",
+ ER_VARIABLE_NOT_SETTABLE_IN_SP: "The system variable %.200s cannot be set in stored procedures.",
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_MODE_IS_OFF: "@@GLOBAL.GTID_PURGED can only be set when @@GLOBAL.GTID_MODE = ON.",
+ ER_CANT_SET_GTID_PURGED_WHEN_GTID_EXECUTED_IS_NOT_EMPTY: "@@GLOBAL.GTID_PURGED can only be set when @@GLOBAL.GTID_EXECUTED is empty.",
+ ER_CANT_SET_GTID_PURGED_WHEN_OWNED_GTIDS_IS_NOT_EMPTY: "@@GLOBAL.GTID_PURGED can only be set when there are no ongoing transactions (not even in other clients).",
+ ER_GTID_PURGED_WAS_CHANGED: "@@GLOBAL.GTID_PURGED was changed from '%s' to '%s'.",
+ ER_GTID_EXECUTED_WAS_CHANGED: "@@GLOBAL.GTID_EXECUTED was changed from '%s' to '%s'.",
+ ER_BINLOG_STMT_MODE_AND_NO_REPL_TABLES: "Cannot execute statement: impossible to write to binary log since BINLOG_FORMAT = STATEMENT, and both replicated and non replicated tables are written to.",
+ ER_ALTER_OPERATION_NOT_SUPPORTED: "%s is not supported for this operation. Try %s.",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: "%s is not supported. Reason: %s. Try %s.",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COPY: "COPY algorithm requires a lock",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_PARTITION: "Partition specific operations do not yet support LOCK/ALGORITHM",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_RENAME: "Columns participating in a foreign key are renamed",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_COLUMN_TYPE: "Cannot change column type INPLACE",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FK_CHECK: "Adding foreign keys needs foreign_key_checks=OFF",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_IGNORE: "Creating unique indexes with IGNORE requires COPY algorithm to remove duplicate rows",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOPK: "Dropping a primary key is not allowed without also adding a new primary key",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_AUTOINC: "Adding an auto-increment column requires a lock",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_HIDDEN_FTS: "Cannot replace hidden FTS_DOC_ID with a user-visible one",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_CHANGE_FTS: "Cannot drop or rename FTS_DOC_ID",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_FTS: "Fulltext index creation requires a lock",
+ ER_SQL_SLAVE_SKIP_COUNTER_NOT_SETTABLE_IN_GTID_MODE: "sql_slave_skip_counter can not be set when the server is running with @@GLOBAL.GTID_MODE = ON. Instead, for each transaction that you want to skip, generate an empty transaction with the same GTID as the transaction",
+ ER_DUP_UNKNOWN_IN_INDEX: "Duplicate entry for key '%-.192s'",
+ ER_IDENT_CAUSES_TOO_LONG_PATH: "Long database name and identifier for object resulted in path length exceeding %d characters. Path: '%s'.",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON_NOT_NULL: "cannot silently convert NULL values, as required in this SQL_MODE",
+ ER_MUST_CHANGE_PASSWORD_LOGIN: "Your password has expired. To log in you must change it using a client that supports expired passwords.",
+ ER_ROW_IN_WRONG_PARTITION: "Found a row in wrong partition %s",
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/error.go b/vendor/github.com/siddontang/go-mysql/mysql/error.go
new file mode 100644
index 000000000..876a4082b
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/error.go
@@ -0,0 +1,66 @@
+package mysql
+
+import (
+ "fmt"
+
+ "github.com/juju/errors"
+)
+
+var (
+ ErrBadConn = errors.New("connection was bad")
+ ErrMalformPacket = errors.New("Malform packet error")
+
+ ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")
+)
+
+type MyError struct {
+ Code uint16
+ Message string
+ State string
+}
+
+func (e *MyError) Error() string {
+ return fmt.Sprintf("ERROR %d (%s): %s", e.Code, e.State, e.Message)
+}
+
+//default mysql error, must adapt errname message format
+func NewDefaultError(errCode uint16, args ...interface{}) *MyError {
+ e := new(MyError)
+ e.Code = errCode
+
+ if s, ok := MySQLState[errCode]; ok {
+ e.State = s
+ } else {
+ e.State = DEFAULT_MYSQL_STATE
+ }
+
+ if format, ok := MySQLErrName[errCode]; ok {
+ e.Message = fmt.Sprintf(format, args...)
+ } else {
+ e.Message = fmt.Sprint(args...)
+ }
+
+ return e
+}
+
+func NewError(errCode uint16, message string) *MyError {
+ e := new(MyError)
+ e.Code = errCode
+
+ if s, ok := MySQLState[errCode]; ok {
+ e.State = s
+ } else {
+ e.State = DEFAULT_MYSQL_STATE
+ }
+
+ e.Message = message
+
+ return e
+}
+
+func ErrorCode(errMsg string) (code int) {
+ var tmpStr string
+ // golang scanf doesn't support %*,so I used a temporary variable
+ fmt.Sscanf(errMsg, "%s%d", &tmpStr, &code)
+ return
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/field.go b/vendor/github.com/siddontang/go-mysql/mysql/field.go
new file mode 100644
index 000000000..c26f6a292
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/field.go
@@ -0,0 +1,157 @@
+package mysql
+
+import (
+ "encoding/binary"
+)
+
+type FieldData []byte
+
+type Field struct {
+ Data FieldData
+ Schema []byte
+ Table []byte
+ OrgTable []byte
+ Name []byte
+ OrgName []byte
+ Charset uint16
+ ColumnLength uint32
+ Type uint8
+ Flag uint16
+ Decimal uint8
+
+ DefaultValueLength uint64
+ DefaultValue []byte
+}
+
+func (p FieldData) Parse() (f *Field, err error) {
+ f = new(Field)
+
+ f.Data = p
+
+ var n int
+ pos := 0
+ //skip catelog, always def
+ n, err = SkipLengthEnodedString(p)
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //schema
+ f.Schema, _, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //table
+ f.Table, _, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //org_table
+ f.OrgTable, _, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //name
+ f.Name, _, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //org_name
+ f.OrgName, _, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return
+ }
+ pos += n
+
+ //skip oc
+ pos += 1
+
+ //charset
+ f.Charset = binary.LittleEndian.Uint16(p[pos:])
+ pos += 2
+
+ //column length
+ f.ColumnLength = binary.LittleEndian.Uint32(p[pos:])
+ pos += 4
+
+ //type
+ f.Type = p[pos]
+ pos++
+
+ //flag
+ f.Flag = binary.LittleEndian.Uint16(p[pos:])
+ pos += 2
+
+ //decimals 1
+ f.Decimal = p[pos]
+ pos++
+
+ //filter [0x00][0x00]
+ pos += 2
+
+ f.DefaultValue = nil
+ //if more data, command was field list
+ if len(p) > pos {
+ //length of default value lenenc-int
+ f.DefaultValueLength, _, n = LengthEncodedInt(p[pos:])
+ pos += n
+
+ if pos+int(f.DefaultValueLength) > len(p) {
+ err = ErrMalformPacket
+ return
+ }
+
+ //default value string[$len]
+ f.DefaultValue = p[pos:(pos + int(f.DefaultValueLength))]
+ }
+
+ return
+}
+
+func (f *Field) Dump() []byte {
+ if f == nil {
+ f = &Field{}
+ }
+ if f.Data != nil {
+ return []byte(f.Data)
+ }
+
+ l := len(f.Schema) + len(f.Table) + len(f.OrgTable) + len(f.Name) + len(f.OrgName) + len(f.DefaultValue) + 48
+
+ data := make([]byte, 0, l)
+
+ data = append(data, PutLengthEncodedString([]byte("def"))...)
+
+ data = append(data, PutLengthEncodedString(f.Schema)...)
+
+ data = append(data, PutLengthEncodedString(f.Table)...)
+ data = append(data, PutLengthEncodedString(f.OrgTable)...)
+
+ data = append(data, PutLengthEncodedString(f.Name)...)
+ data = append(data, PutLengthEncodedString(f.OrgName)...)
+
+ data = append(data, 0x0c)
+
+ data = append(data, Uint16ToBytes(f.Charset)...)
+ data = append(data, Uint32ToBytes(f.ColumnLength)...)
+ data = append(data, f.Type)
+ data = append(data, Uint16ToBytes(f.Flag)...)
+ data = append(data, f.Decimal)
+ data = append(data, 0, 0)
+
+ if f.DefaultValue != nil {
+ data = append(data, Uint64ToBytes(f.DefaultValueLength)...)
+ data = append(data, f.DefaultValue...)
+ }
+
+ return data
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/gtid.go b/vendor/github.com/siddontang/go-mysql/mysql/gtid.go
new file mode 100644
index 000000000..cde990150
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/gtid.go
@@ -0,0 +1,29 @@
+package mysql
+
+import "github.com/juju/errors"
+
+type GTIDSet interface {
+ String() string
+
+ // Encode GTID set into binary format used in binlog dump commands
+ Encode() []byte
+
+ Equal(o GTIDSet) bool
+
+ Contain(o GTIDSet) bool
+
+ Update(GTIDStr string) error
+
+ Clone() GTIDSet
+}
+
+func ParseGTIDSet(flavor string, s string) (GTIDSet, error) {
+ switch flavor {
+ case MySQLFlavor:
+ return ParseMysqlGTIDSet(s)
+ case MariaDBFlavor:
+ return ParseMariadbGTIDSet(s)
+ default:
+ return nil, errors.Errorf("invalid flavor %s", flavor)
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go b/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go
new file mode 100644
index 000000000..09fe7ac50
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/mariadb_gtid.go
@@ -0,0 +1,232 @@
+package mysql
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/juju/errors"
+ "github.com/siddontang/go-log/log"
+ "github.com/siddontang/go/hack"
+)
+
+// MariadbGTID represent mariadb gtid, [domain ID]-[server-id]-[sequence]
+type MariadbGTID struct {
+ DomainID uint32
+ ServerID uint32
+ SequenceNumber uint64
+}
+
+// ParseMariadbGTID parses mariadb gtid, [domain ID]-[server-id]-[sequence]
+func ParseMariadbGTID(str string) (*MariadbGTID, error) {
+ if len(str) == 0 {
+ return &MariadbGTID{0, 0, 0}, nil
+ }
+
+ seps := strings.Split(str, "-")
+
+ gtid := new(MariadbGTID)
+
+ if len(seps) != 3 {
+ return gtid, errors.Errorf("invalid Mariadb GTID %v, must domain-server-sequence", str)
+ }
+
+ domainID, err := strconv.ParseUint(seps[0], 10, 32)
+ if err != nil {
+ return gtid, errors.Errorf("invalid MariaDB GTID Domain ID (%v): %v", seps[0], err)
+ }
+
+ serverID, err := strconv.ParseUint(seps[1], 10, 32)
+ if err != nil {
+ return gtid, errors.Errorf("invalid MariaDB GTID Server ID (%v): %v", seps[1], err)
+ }
+
+ sequenceID, err := strconv.ParseUint(seps[2], 10, 64)
+ if err != nil {
+ return gtid, errors.Errorf("invalid MariaDB GTID Sequence number (%v): %v", seps[2], err)
+ }
+
+ return &MariadbGTID{
+ DomainID: uint32(domainID),
+ ServerID: uint32(serverID),
+ SequenceNumber: sequenceID}, nil
+}
+
+func (gtid *MariadbGTID) String() string {
+ if gtid.DomainID == 0 && gtid.ServerID == 0 && gtid.SequenceNumber == 0 {
+ return ""
+ }
+
+ return fmt.Sprintf("%d-%d-%d", gtid.DomainID, gtid.ServerID, gtid.SequenceNumber)
+}
+
+// Contain return whether one mariadb gtid covers another mariadb gtid
+func (gtid *MariadbGTID) Contain(other *MariadbGTID) bool {
+ return gtid.DomainID == other.DomainID && gtid.SequenceNumber >= other.SequenceNumber
+}
+
+// Clone clones a mariadb gtid
+func (gtid *MariadbGTID) Clone() *MariadbGTID {
+ o := new(MariadbGTID)
+ *o = *gtid
+ return o
+}
+
+func (gtid *MariadbGTID) forward(newer *MariadbGTID) error {
+ if newer.DomainID != gtid.DomainID {
+ return errors.Errorf("%s is not same with doamin of %s", newer, gtid)
+ }
+
+ /*
+ Here's a simplified example of binlog events.
+ Although I think one domain should have only one update at same time, we can't limit the user's usage.
+ we just output a warn log and let it go on
+ | mysqld-bin.000001 | 1453 | Gtid | 112 | 1495 | BEGIN GTID 0-112-6 |
+ | mysqld-bin.000001 | 1624 | Xid | 112 | 1655 | COMMIT xid=74 |
+ | mysqld-bin.000001 | 1655 | Gtid | 112 | 1697 | BEGIN GTID 0-112-7 |
+ | mysqld-bin.000001 | 1826 | Xid | 112 | 1857 | COMMIT xid=75 |
+ | mysqld-bin.000001 | 1857 | Gtid | 111 | 1899 | BEGIN GTID 0-111-5 |
+ | mysqld-bin.000001 | 1981 | Xid | 111 | 2012 | COMMIT xid=77 |
+ | mysqld-bin.000001 | 2012 | Gtid | 112 | 2054 | BEGIN GTID 0-112-8 |
+ | mysqld-bin.000001 | 2184 | Xid | 112 | 2215 | COMMIT xid=116 |
+ | mysqld-bin.000001 | 2215 | Gtid | 111 | 2257 | BEGIN GTID 0-111-6 |
+ */
+ if newer.SequenceNumber <= gtid.SequenceNumber {
+ log.Warnf("out of order binlog appears with gtid %s vs current position gtid %s", newer, gtid)
+ }
+
+ gtid.ServerID = newer.ServerID
+ gtid.SequenceNumber = newer.SequenceNumber
+ return nil
+}
+
+// MariadbGTIDSet is a set of mariadb gtid
+type MariadbGTIDSet struct {
+ Sets map[uint32]*MariadbGTID
+}
+
+// ParseMariadbGTIDSet parses str into mariadb gtid sets
+func ParseMariadbGTIDSet(str string) (GTIDSet, error) {
+ s := new(MariadbGTIDSet)
+ s.Sets = make(map[uint32]*MariadbGTID)
+ if str == "" {
+ return s, nil
+ }
+
+ sp := strings.Split(str, ",")
+
+ //todo, handle redundant same uuid
+ for i := 0; i < len(sp); i++ {
+ err := s.Update(sp[i])
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+ return s, nil
+}
+
+// AddSet adds mariadb gtid into mariadb gtid set
+func (s *MariadbGTIDSet) AddSet(gtid *MariadbGTID) error {
+ if gtid == nil {
+ return nil
+ }
+
+ o, ok := s.Sets[gtid.DomainID]
+ if ok {
+ err := o.forward(gtid)
+ if err != nil {
+ return errors.Trace(err)
+ }
+ } else {
+ s.Sets[gtid.DomainID] = gtid
+ }
+
+ return nil
+}
+
+// Update updates mariadb gtid set
+func (s *MariadbGTIDSet) Update(GTIDStr string) error {
+ gtid, err := ParseMariadbGTID(GTIDStr)
+ if err != nil {
+ return err
+ }
+
+ err = s.AddSet(gtid)
+ return errors.Trace(err)
+}
+
+func (s *MariadbGTIDSet) String() string {
+ return hack.String(s.Encode())
+}
+
+// Encode encodes mariadb gtid set
+func (s *MariadbGTIDSet) Encode() []byte {
+ var buf bytes.Buffer
+ sep := ""
+ for _, gtid := range s.Sets {
+ buf.WriteString(sep)
+ buf.WriteString(gtid.String())
+ sep = ","
+ }
+
+ return buf.Bytes()
+}
+
+// Clone clones a mariadb gtid set
+func (s *MariadbGTIDSet) Clone() GTIDSet {
+ clone := &MariadbGTIDSet{
+ Sets: make(map[uint32]*MariadbGTID),
+ }
+ for domainID, gtid := range s.Sets {
+ clone.Sets[domainID] = gtid.Clone()
+ }
+
+ return clone
+}
+
+// Equal returns true if two mariadb gtid set is same, otherwise return false
+func (s *MariadbGTIDSet) Equal(o GTIDSet) bool {
+ other, ok := o.(*MariadbGTIDSet)
+ if !ok {
+ return false
+ }
+
+ if len(other.Sets) != len(s.Sets) {
+ return false
+ }
+
+ for domainID, gtid := range other.Sets {
+ o, ok := s.Sets[domainID]
+ if !ok {
+ return false
+ }
+
+ if *gtid != *o {
+ return false
+ }
+ }
+
+ return true
+}
+
+// Contain return whether one mariadb gtid set covers another mariadb gtid set
+func (s *MariadbGTIDSet) Contain(o GTIDSet) bool {
+ other, ok := o.(*MariadbGTIDSet)
+ if !ok {
+ return false
+ }
+
+ for doaminID, gtid := range other.Sets {
+ o, ok := s.Sets[doaminID]
+ if !ok {
+ return false
+ }
+
+ if !o.Contain(gtid) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go b/vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go
new file mode 100644
index 000000000..a937cb841
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/mysql_gtid.go
@@ -0,0 +1,449 @@
+package mysql
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/juju/errors"
+ "github.com/satori/go.uuid"
+ "github.com/siddontang/go/hack"
+)
+
+// Like MySQL GTID Interval struct, [start, stop), left closed and right open
+// See MySQL rpl_gtid.h
+type Interval struct {
+ // The first GID of this interval.
+ Start int64
+ // The first GID after this interval.
+ Stop int64
+}
+
+// Interval is [start, stop), but the GTID string's format is [n] or [n1-n2], closed interval
+func parseInterval(str string) (i Interval, err error) {
+ p := strings.Split(str, "-")
+ switch len(p) {
+ case 1:
+ i.Start, err = strconv.ParseInt(p[0], 10, 64)
+ i.Stop = i.Start + 1
+ case 2:
+ i.Start, err = strconv.ParseInt(p[0], 10, 64)
+ i.Stop, err = strconv.ParseInt(p[1], 10, 64)
+ i.Stop = i.Stop + 1
+ default:
+ err = errors.Errorf("invalid interval format, must n[-n]")
+ }
+
+ if err != nil {
+ return
+ }
+
+ if i.Stop <= i.Start {
+ err = errors.Errorf("invalid interval format, must n[-n] and the end must >= start")
+ }
+
+ return
+}
+
+func (i Interval) String() string {
+ if i.Stop == i.Start+1 {
+ return fmt.Sprintf("%d", i.Start)
+ } else {
+ return fmt.Sprintf("%d-%d", i.Start, i.Stop-1)
+ }
+}
+
+type IntervalSlice []Interval
+
+func (s IntervalSlice) Len() int {
+ return len(s)
+}
+
+func (s IntervalSlice) Less(i, j int) bool {
+ if s[i].Start < s[j].Start {
+ return true
+ } else if s[i].Start > s[j].Start {
+ return false
+ } else {
+ return s[i].Stop < s[j].Stop
+ }
+}
+
+func (s IntervalSlice) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s IntervalSlice) Sort() {
+ sort.Sort(s)
+}
+
+func (s IntervalSlice) Normalize() IntervalSlice {
+ var n IntervalSlice
+ if len(s) == 0 {
+ return n
+ }
+
+ s.Sort()
+
+ n = append(n, s[0])
+
+ for i := 1; i < len(s); i++ {
+ last := n[len(n)-1]
+ if s[i].Start > last.Stop {
+ n = append(n, s[i])
+ continue
+ } else {
+ stop := s[i].Stop
+ if last.Stop > stop {
+ stop = last.Stop
+ }
+ n[len(n)-1] = Interval{last.Start, stop}
+ }
+ }
+
+ return n
+}
+
+// Return true if sub in s
+func (s IntervalSlice) Contain(sub IntervalSlice) bool {
+ j := 0
+ for i := 0; i < len(sub); i++ {
+ for ; j < len(s); j++ {
+ if sub[i].Start > s[j].Stop {
+ continue
+ } else {
+ break
+ }
+ }
+ if j == len(s) {
+ return false
+ }
+
+ if sub[i].Start < s[j].Start || sub[i].Stop > s[j].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (s IntervalSlice) Equal(o IntervalSlice) bool {
+ if len(s) != len(o) {
+ return false
+ }
+
+ for i := 0; i < len(s); i++ {
+ if s[i].Start != o[i].Start || s[i].Stop != o[i].Stop {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (s IntervalSlice) Compare(o IntervalSlice) int {
+ if s.Equal(o) {
+ return 0
+ } else if s.Contain(o) {
+ return 1
+ } else {
+ return -1
+ }
+}
+
+// Refer http://dev.mysql.com/doc/refman/5.6/en/replication-gtids-concepts.html
+type UUIDSet struct {
+ SID uuid.UUID
+
+ Intervals IntervalSlice
+}
+
+func ParseUUIDSet(str string) (*UUIDSet, error) {
+ str = strings.TrimSpace(str)
+ sep := strings.Split(str, ":")
+ if len(sep) < 2 {
+ return nil, errors.Errorf("invalid GTID format, must UUID:interval[:interval]")
+ }
+
+ var err error
+ s := new(UUIDSet)
+ if s.SID, err = uuid.FromString(sep[0]); err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ // Handle interval
+ for i := 1; i < len(sep); i++ {
+ if in, err := parseInterval(sep[i]); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ s.Intervals = append(s.Intervals, in)
+ }
+ }
+
+ s.Intervals = s.Intervals.Normalize()
+
+ return s, nil
+}
+
+func NewUUIDSet(sid uuid.UUID, in ...Interval) *UUIDSet {
+ s := new(UUIDSet)
+ s.SID = sid
+
+ s.Intervals = in
+ s.Intervals = s.Intervals.Normalize()
+
+ return s
+}
+
+func (s *UUIDSet) Contain(sub *UUIDSet) bool {
+ if !bytes.Equal(s.SID.Bytes(), sub.SID.Bytes()) {
+ return false
+ }
+
+ return s.Intervals.Contain(sub.Intervals)
+}
+
+func (s *UUIDSet) Bytes() []byte {
+ var buf bytes.Buffer
+
+ buf.WriteString(s.SID.String())
+
+ for _, i := range s.Intervals {
+ buf.WriteString(":")
+ buf.WriteString(i.String())
+ }
+
+ return buf.Bytes()
+}
+
+func (s *UUIDSet) AddInterval(in IntervalSlice) {
+ s.Intervals = append(s.Intervals, in...)
+ s.Intervals = s.Intervals.Normalize()
+}
+
+func (s *UUIDSet) String() string {
+ return hack.String(s.Bytes())
+}
+
+func (s *UUIDSet) encode(w io.Writer) {
+ w.Write(s.SID.Bytes())
+ n := int64(len(s.Intervals))
+
+ binary.Write(w, binary.LittleEndian, n)
+
+ for _, i := range s.Intervals {
+ binary.Write(w, binary.LittleEndian, i.Start)
+ binary.Write(w, binary.LittleEndian, i.Stop)
+ }
+}
+
+func (s *UUIDSet) Encode() []byte {
+ var buf bytes.Buffer
+
+ s.encode(&buf)
+
+ return buf.Bytes()
+}
+
+func (s *UUIDSet) decode(data []byte) (int, error) {
+ if len(data) < 24 {
+ return 0, errors.Errorf("invalid uuid set buffer, less 24")
+ }
+
+ pos := 0
+ var err error
+ if s.SID, err = uuid.FromBytes(data[0:16]); err != nil {
+ return 0, err
+ }
+ pos += 16
+
+ n := int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ if len(data) < int(16*n)+pos {
+ return 0, errors.Errorf("invalid uuid set buffer, must %d, but %d", pos+int(16*n), len(data))
+ }
+
+ s.Intervals = make([]Interval, 0, n)
+
+ var in Interval
+ for i := int64(0); i < n; i++ {
+ in.Start = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ in.Stop = int64(binary.LittleEndian.Uint64(data[pos : pos+8]))
+ pos += 8
+ s.Intervals = append(s.Intervals, in)
+ }
+
+ return pos, nil
+}
+
+func (s *UUIDSet) Decode(data []byte) error {
+ n, err := s.decode(data)
+ if n != len(data) {
+ return errors.Errorf("invalid uuid set buffer, must %d, but %d", n, len(data))
+ }
+ return err
+}
+
+func (s *UUIDSet) Clone() *UUIDSet {
+ clone := new(UUIDSet)
+
+ clone.SID, _ = uuid.FromString(s.SID.String())
+ clone.Intervals = s.Intervals.Normalize()
+
+ return clone
+}
+
+type MysqlGTIDSet struct {
+ Sets map[string]*UUIDSet
+}
+
+func ParseMysqlGTIDSet(str string) (GTIDSet, error) {
+ s := new(MysqlGTIDSet)
+ s.Sets = make(map[string]*UUIDSet)
+ if str == "" {
+ return s, nil
+ }
+
+ sp := strings.Split(str, ",")
+
+ //todo, handle redundant same uuid
+ for i := 0; i < len(sp); i++ {
+ if set, err := ParseUUIDSet(sp[i]); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ s.AddSet(set)
+ }
+
+ }
+ return s, nil
+}
+
+func DecodeMysqlGTIDSet(data []byte) (*MysqlGTIDSet, error) {
+ s := new(MysqlGTIDSet)
+
+ if len(data) < 8 {
+ return nil, errors.Errorf("invalid gtid set buffer, less 4")
+ }
+
+ n := int(binary.LittleEndian.Uint64(data))
+ s.Sets = make(map[string]*UUIDSet, n)
+
+ pos := 8
+
+ for i := 0; i < n; i++ {
+ set := new(UUIDSet)
+ if n, err := set.decode(data[pos:]); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ pos += n
+
+ s.AddSet(set)
+ }
+ }
+ return s, nil
+}
+
+func (s *MysqlGTIDSet) AddSet(set *UUIDSet) {
+ if set == nil {
+ return
+ }
+ sid := set.SID.String()
+ o, ok := s.Sets[sid]
+ if ok {
+ o.AddInterval(set.Intervals)
+ } else {
+ s.Sets[sid] = set
+ }
+}
+
+func (s *MysqlGTIDSet) Update(GTIDStr string) error {
+ uuidSet, err := ParseUUIDSet(GTIDStr)
+ if err != nil {
+ return err
+ }
+
+ s.AddSet(uuidSet)
+
+ return nil
+}
+
+func (s *MysqlGTIDSet) Contain(o GTIDSet) bool {
+ sub, ok := o.(*MysqlGTIDSet)
+ if !ok {
+ return false
+ }
+
+ for key, set := range sub.Sets {
+ o, ok := s.Sets[key]
+ if !ok {
+ return false
+ }
+
+ if !o.Contain(set) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (s *MysqlGTIDSet) Equal(o GTIDSet) bool {
+ sub, ok := o.(*MysqlGTIDSet)
+ if !ok {
+ return false
+ }
+
+ for key, set := range sub.Sets {
+ o, ok := s.Sets[key]
+ if !ok {
+ return false
+ }
+
+ if !o.Intervals.Equal(set.Intervals) {
+ return false
+ }
+ }
+
+ return true
+
+}
+
+func (s *MysqlGTIDSet) String() string {
+ var buf bytes.Buffer
+ sep := ""
+ for _, set := range s.Sets {
+ buf.WriteString(sep)
+ buf.WriteString(set.String())
+ sep = ","
+ }
+
+ return hack.String(buf.Bytes())
+}
+
+func (s *MysqlGTIDSet) Encode() []byte {
+ var buf bytes.Buffer
+
+ binary.Write(&buf, binary.LittleEndian, uint64(len(s.Sets)))
+
+ for i, _ := range s.Sets {
+ s.Sets[i].encode(&buf)
+ }
+
+ return buf.Bytes()
+}
+
+func (gtid *MysqlGTIDSet) Clone() GTIDSet {
+ clone := &MysqlGTIDSet{
+ Sets: make(map[string]*UUIDSet),
+ }
+ for sid, uuidSet := range gtid.Sets {
+ clone.Sets[sid] = uuidSet.Clone()
+ }
+
+ return clone
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go b/vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go
new file mode 100644
index 000000000..b9b8179a1
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/parse_binary.go
@@ -0,0 +1,53 @@
+package mysql
+
+import (
+ "encoding/binary"
+ "math"
+)
+
+func ParseBinaryInt8(data []byte) int8 {
+ return int8(data[0])
+}
+func ParseBinaryUint8(data []byte) uint8 {
+ return data[0]
+}
+
+func ParseBinaryInt16(data []byte) int16 {
+ return int16(binary.LittleEndian.Uint16(data))
+}
+func ParseBinaryUint16(data []byte) uint16 {
+ return binary.LittleEndian.Uint16(data)
+}
+
+func ParseBinaryInt24(data []byte) int32 {
+ u32 := uint32(ParseBinaryUint24(data))
+ if u32&0x00800000 != 0 {
+ u32 |= 0xFF000000
+ }
+ return int32(u32)
+}
+func ParseBinaryUint24(data []byte) uint32 {
+ return uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16
+}
+
+func ParseBinaryInt32(data []byte) int32 {
+ return int32(binary.LittleEndian.Uint32(data))
+}
+func ParseBinaryUint32(data []byte) uint32 {
+ return binary.LittleEndian.Uint32(data)
+}
+
+func ParseBinaryInt64(data []byte) int64 {
+ return int64(binary.LittleEndian.Uint64(data))
+}
+func ParseBinaryUint64(data []byte) uint64 {
+ return binary.LittleEndian.Uint64(data)
+}
+
+func ParseBinaryFloat32(data []byte) float32 {
+ return math.Float32frombits(binary.LittleEndian.Uint32(data))
+}
+
+func ParseBinaryFloat64(data []byte) float64 {
+ return math.Float64frombits(binary.LittleEndian.Uint64(data))
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/position.go b/vendor/github.com/siddontang/go-mysql/mysql/position.go
new file mode 100644
index 000000000..bee5485d5
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/position.go
@@ -0,0 +1,33 @@
+package mysql
+
+import (
+ "fmt"
+)
+
+// For binlog filename + position based replication
+type Position struct {
+ Name string
+ Pos uint32
+}
+
+func (p Position) Compare(o Position) int {
+ // First compare binlog name
+ if p.Name > o.Name {
+ return 1
+ } else if p.Name < o.Name {
+ return -1
+ } else {
+ // Same binlog file, compare position
+ if p.Pos > o.Pos {
+ return 1
+ } else if p.Pos < o.Pos {
+ return -1
+ } else {
+ return 0
+ }
+ }
+}
+
+func (p Position) String() string {
+ return fmt.Sprintf("(%s, %d)", p.Name, p.Pos)
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/result.go b/vendor/github.com/siddontang/go-mysql/mysql/result.go
new file mode 100644
index 000000000..d6c80e422
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/result.go
@@ -0,0 +1,14 @@
+package mysql
+
+type Result struct {
+ Status uint16
+
+ InsertId uint64
+ AffectedRows uint64
+
+ *Resultset
+}
+
+type Executer interface {
+ Execute(query string, args ...interface{}) (*Result, error)
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/resultset.go b/vendor/github.com/siddontang/go-mysql/mysql/resultset.go
new file mode 100644
index 000000000..080405087
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/resultset.go
@@ -0,0 +1,439 @@
+package mysql
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/juju/errors"
+ "github.com/siddontang/go/hack"
+)
+
+type RowData []byte
+
+func (p RowData) Parse(f []*Field, binary bool) ([]interface{}, error) {
+ if binary {
+ return p.ParseBinary(f)
+ } else {
+ return p.ParseText(f)
+ }
+}
+
+func (p RowData) ParseText(f []*Field) ([]interface{}, error) {
+ data := make([]interface{}, len(f))
+
+ var err error
+ var v []byte
+ var isNull bool
+ var pos int = 0
+ var n int = 0
+
+ for i := range f {
+ v, isNull, n, err = LengthEnodedString(p[pos:])
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ pos += n
+
+ if isNull {
+ data[i] = nil
+ } else {
+ isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
+
+ switch f[i].Type {
+ case MYSQL_TYPE_TINY, MYSQL_TYPE_SHORT, MYSQL_TYPE_INT24,
+ MYSQL_TYPE_LONGLONG, MYSQL_TYPE_YEAR:
+ if isUnsigned {
+ data[i], err = strconv.ParseUint(string(v), 10, 64)
+ } else {
+ data[i], err = strconv.ParseInt(string(v), 10, 64)
+ }
+ case MYSQL_TYPE_FLOAT, MYSQL_TYPE_DOUBLE:
+ data[i], err = strconv.ParseFloat(string(v), 64)
+ default:
+ data[i] = v
+ }
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+ }
+
+ return data, nil
+}
+
+func (p RowData) ParseBinary(f []*Field) ([]interface{}, error) {
+ data := make([]interface{}, len(f))
+
+ if p[0] != OK_HEADER {
+ return nil, ErrMalformPacket
+ }
+
+ pos := 1 + ((len(f) + 7 + 2) >> 3)
+
+ nullBitmap := p[1:pos]
+
+ var isNull bool
+ var n int
+ var err error
+ var v []byte
+ for i := range data {
+ if nullBitmap[(i+2)/8]&(1<<(uint(i+2)%8)) > 0 {
+ data[i] = nil
+ continue
+ }
+
+ isUnsigned := f[i].Flag&UNSIGNED_FLAG != 0
+
+ switch f[i].Type {
+ case MYSQL_TYPE_NULL:
+ data[i] = nil
+ continue
+
+ case MYSQL_TYPE_TINY:
+ if isUnsigned {
+ data[i] = ParseBinaryUint8(p[pos : pos+1])
+ } else {
+ data[i] = ParseBinaryInt8(p[pos : pos+1])
+ }
+ pos++
+ continue
+
+ case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
+ if isUnsigned {
+ data[i] = ParseBinaryUint16(p[pos : pos+2])
+ } else {
+ data[i] = ParseBinaryInt16(p[pos : pos+2])
+ }
+ pos += 2
+ continue
+
+ case MYSQL_TYPE_INT24:
+ if isUnsigned {
+ data[i] = ParseBinaryUint24(p[pos : pos+3])
+ } else {
+ data[i] = ParseBinaryInt24(p[pos : pos+3])
+ }
+ //3 byte
+ pos += 3
+ continue
+
+ case MYSQL_TYPE_LONG:
+ if isUnsigned {
+ data[i] = ParseBinaryUint32(p[pos : pos+4])
+ } else {
+ data[i] = ParseBinaryInt32(p[pos : pos+4])
+ }
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_LONGLONG:
+ if isUnsigned {
+ data[i] = ParseBinaryUint64(p[pos : pos+8])
+ } else {
+ data[i] = ParseBinaryInt64(p[pos : pos+8])
+ }
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_FLOAT:
+ data[i] = ParseBinaryFloat32(p[pos : pos+4])
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_DOUBLE:
+ data[i] = ParseBinaryFloat64(p[pos : pos+8])
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
+ MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
+ MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
+ MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY:
+ v, isNull, n, err = LengthEnodedString(p[pos:])
+ pos += n
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ if !isNull {
+ data[i] = v
+ continue
+ } else {
+ data[i] = nil
+ continue
+ }
+ case MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i] = nil
+ continue
+ }
+
+ data[i], err = FormatBinaryDate(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ case MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i] = nil
+ continue
+ }
+
+ data[i], err = FormatBinaryDateTime(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ case MYSQL_TYPE_TIME:
+ var num uint64
+ num, isNull, n = LengthEncodedInt(p[pos:])
+
+ pos += n
+
+ if isNull {
+ data[i] = nil
+ continue
+ }
+
+ data[i], err = FormatBinaryTime(int(num), p[pos:])
+ pos += int(num)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ default:
+ return nil, errors.Errorf("Stmt Unknown FieldType %d %s", f[i].Type, f[i].Name)
+ }
+ }
+
+ return data, nil
+}
+
+type Resultset struct {
+ Fields []*Field
+ FieldNames map[string]int
+ Values [][]interface{}
+
+ RowDatas []RowData
+}
+
+func (r *Resultset) RowNumber() int {
+ return len(r.Values)
+}
+
+func (r *Resultset) ColumnNumber() int {
+ return len(r.Fields)
+}
+
+func (r *Resultset) GetValue(row, column int) (interface{}, error) {
+ if row >= len(r.Values) || row < 0 {
+ return nil, errors.Errorf("invalid row index %d", row)
+ }
+
+ if column >= len(r.Fields) || column < 0 {
+ return nil, errors.Errorf("invalid column index %d", column)
+ }
+
+ return r.Values[row][column], nil
+}
+
+func (r *Resultset) NameIndex(name string) (int, error) {
+ if column, ok := r.FieldNames[name]; ok {
+ return column, nil
+ } else {
+ return 0, errors.Errorf("invalid field name %s", name)
+ }
+}
+
+func (r *Resultset) GetValueByName(row int, name string) (interface{}, error) {
+ if column, err := r.NameIndex(name); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ return r.GetValue(row, column)
+ }
+}
+
+func (r *Resultset) IsNull(row, column int) (bool, error) {
+ d, err := r.GetValue(row, column)
+ if err != nil {
+ return false, err
+ }
+
+ return d == nil, nil
+}
+
+func (r *Resultset) IsNullByName(row int, name string) (bool, error) {
+ if column, err := r.NameIndex(name); err != nil {
+ return false, err
+ } else {
+ return r.IsNull(row, column)
+ }
+}
+
+func (r *Resultset) GetUint(row, column int) (uint64, error) {
+ d, err := r.GetValue(row, column)
+ if err != nil {
+ return 0, err
+ }
+
+ switch v := d.(type) {
+ case int:
+ return uint64(v), nil
+ case int8:
+ return uint64(v), nil
+ case int16:
+ return uint64(v), nil
+ case int32:
+ return uint64(v), nil
+ case int64:
+ return uint64(v), nil
+ case uint:
+ return uint64(v), nil
+ case uint8:
+ return uint64(v), nil
+ case uint16:
+ return uint64(v), nil
+ case uint32:
+ return uint64(v), nil
+ case uint64:
+ return uint64(v), nil
+ case float32:
+ return uint64(v), nil
+ case float64:
+ return uint64(v), nil
+ case string:
+ return strconv.ParseUint(v, 10, 64)
+ case []byte:
+ return strconv.ParseUint(string(v), 10, 64)
+ case nil:
+ return 0, nil
+ default:
+ return 0, errors.Errorf("data type is %T", v)
+ }
+}
+
+func (r *Resultset) GetUintByName(row int, name string) (uint64, error) {
+ if column, err := r.NameIndex(name); err != nil {
+ return 0, err
+ } else {
+ return r.GetUint(row, column)
+ }
+}
+
+func (r *Resultset) GetInt(row, column int) (int64, error) {
+ v, err := r.GetUint(row, column)
+ if err != nil {
+ return 0, err
+ }
+
+ return int64(v), nil
+}
+
+func (r *Resultset) GetIntByName(row int, name string) (int64, error) {
+ v, err := r.GetUintByName(row, name)
+ if err != nil {
+ return 0, err
+ }
+
+ return int64(v), nil
+}
+
+func (r *Resultset) GetFloat(row, column int) (float64, error) {
+ d, err := r.GetValue(row, column)
+ if err != nil {
+ return 0, err
+ }
+
+ switch v := d.(type) {
+ case int:
+ return float64(v), nil
+ case int8:
+ return float64(v), nil
+ case int16:
+ return float64(v), nil
+ case int32:
+ return float64(v), nil
+ case int64:
+ return float64(v), nil
+ case uint:
+ return float64(v), nil
+ case uint8:
+ return float64(v), nil
+ case uint16:
+ return float64(v), nil
+ case uint32:
+ return float64(v), nil
+ case uint64:
+ return float64(v), nil
+ case float32:
+ return float64(v), nil
+ case float64:
+ return v, nil
+ case string:
+ return strconv.ParseFloat(v, 64)
+ case []byte:
+ return strconv.ParseFloat(string(v), 64)
+ case nil:
+ return 0, nil
+ default:
+ return 0, errors.Errorf("data type is %T", v)
+ }
+}
+
+func (r *Resultset) GetFloatByName(row int, name string) (float64, error) {
+ if column, err := r.NameIndex(name); err != nil {
+ return 0, err
+ } else {
+ return r.GetFloat(row, column)
+ }
+}
+
+func (r *Resultset) GetString(row, column int) (string, error) {
+ d, err := r.GetValue(row, column)
+ if err != nil {
+ return "", err
+ }
+
+ switch v := d.(type) {
+ case string:
+ return v, nil
+ case []byte:
+ return hack.String(v), nil
+ case int, int8, int16, int32, int64,
+ uint, uint8, uint16, uint32, uint64:
+ return fmt.Sprintf("%d", v), nil
+ case float32:
+ return strconv.FormatFloat(float64(v), 'f', -1, 64), nil
+ case float64:
+ return strconv.FormatFloat(v, 'f', -1, 64), nil
+ case nil:
+ return "", nil
+ default:
+ return "", errors.Errorf("data type is %T", v)
+ }
+}
+
+func (r *Resultset) GetStringByName(row int, name string) (string, error) {
+ if column, err := r.NameIndex(name); err != nil {
+ return "", err
+ } else {
+ return r.GetString(row, column)
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go b/vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go
new file mode 100644
index 000000000..307684db9
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/resultset_helper.go
@@ -0,0 +1,248 @@
+package mysql
+
+import (
+ "math"
+ "strconv"
+
+ "github.com/juju/errors"
+ "github.com/siddontang/go/hack"
+)
+
+func formatTextValue(value interface{}) ([]byte, error) {
+ switch v := value.(type) {
+ case int8:
+ return strconv.AppendInt(nil, int64(v), 10), nil
+ case int16:
+ return strconv.AppendInt(nil, int64(v), 10), nil
+ case int32:
+ return strconv.AppendInt(nil, int64(v), 10), nil
+ case int64:
+ return strconv.AppendInt(nil, int64(v), 10), nil
+ case int:
+ return strconv.AppendInt(nil, int64(v), 10), nil
+ case uint8:
+ return strconv.AppendUint(nil, uint64(v), 10), nil
+ case uint16:
+ return strconv.AppendUint(nil, uint64(v), 10), nil
+ case uint32:
+ return strconv.AppendUint(nil, uint64(v), 10), nil
+ case uint64:
+ return strconv.AppendUint(nil, uint64(v), 10), nil
+ case uint:
+ return strconv.AppendUint(nil, uint64(v), 10), nil
+ case float32:
+ return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
+ case float64:
+ return strconv.AppendFloat(nil, float64(v), 'f', -1, 64), nil
+ case []byte:
+ return v, nil
+ case string:
+ return hack.Slice(v), nil
+ case nil:
+ return nil, nil
+ default:
+ return nil, errors.Errorf("invalid type %T", value)
+ }
+}
+
+func formatBinaryValue(value interface{}) ([]byte, error) {
+ switch v := value.(type) {
+ case int8:
+ return Uint64ToBytes(uint64(v)), nil
+ case int16:
+ return Uint64ToBytes(uint64(v)), nil
+ case int32:
+ return Uint64ToBytes(uint64(v)), nil
+ case int64:
+ return Uint64ToBytes(uint64(v)), nil
+ case int:
+ return Uint64ToBytes(uint64(v)), nil
+ case uint8:
+ return Uint64ToBytes(uint64(v)), nil
+ case uint16:
+ return Uint64ToBytes(uint64(v)), nil
+ case uint32:
+ return Uint64ToBytes(uint64(v)), nil
+ case uint64:
+ return Uint64ToBytes(uint64(v)), nil
+ case uint:
+ return Uint64ToBytes(uint64(v)), nil
+ case float32:
+ return Uint64ToBytes(math.Float64bits(float64(v))), nil
+ case float64:
+ return Uint64ToBytes(math.Float64bits(v)), nil
+ case []byte:
+ return v, nil
+ case string:
+ return hack.Slice(v), nil
+ default:
+ return nil, errors.Errorf("invalid type %T", value)
+ }
+}
+
+func fieldType(value interface{}) (typ uint8, err error) {
+ switch value.(type) {
+ case int8, int16, int32, int64, int:
+ typ = MYSQL_TYPE_LONGLONG
+ case uint8, uint16, uint32, uint64, uint:
+ typ = MYSQL_TYPE_LONGLONG
+ case float32, float64:
+ typ = MYSQL_TYPE_DOUBLE
+ case string, []byte:
+ typ = MYSQL_TYPE_VAR_STRING
+ case nil:
+ typ = MYSQL_TYPE_NULL
+ default:
+ err = errors.Errorf("unsupport type %T for resultset", value)
+ }
+ return
+}
+
+func formatField(field *Field, value interface{}) error {
+ switch value.(type) {
+ case int8, int16, int32, int64, int:
+ field.Charset = 63
+ field.Flag = BINARY_FLAG | NOT_NULL_FLAG
+ case uint8, uint16, uint32, uint64, uint:
+ field.Charset = 63
+ field.Flag = BINARY_FLAG | NOT_NULL_FLAG | UNSIGNED_FLAG
+ case float32, float64:
+ field.Charset = 63
+ field.Flag = BINARY_FLAG | NOT_NULL_FLAG
+ case string, []byte:
+ field.Charset = 33
+ case nil:
+ field.Charset = 33
+ default:
+ return errors.Errorf("unsupport type %T for resultset", value)
+ }
+ return nil
+}
+
+func BuildSimpleTextResultset(names []string, values [][]interface{}) (*Resultset, error) {
+ r := new(Resultset)
+
+ r.Fields = make([]*Field, len(names))
+
+ var b []byte
+
+ if len(values) == 0 {
+ for i, name := range names {
+ r.Fields[i] = &Field{Name: hack.Slice(name), Charset: 33, Type: MYSQL_TYPE_NULL}
+ }
+ return r, nil
+ }
+
+ for i, vs := range values {
+ if len(vs) != len(r.Fields) {
+ return nil, errors.Errorf("row %d has %d column not equal %d", i, len(vs), len(r.Fields))
+ }
+
+ var row []byte
+ for j, value := range vs {
+ typ, err := fieldType(value)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ if r.Fields[j] == nil {
+ r.Fields[j] = &Field{Name: hack.Slice(names[j]), Type: typ}
+ formatField(r.Fields[j], value)
+ } else if typ != r.Fields[j].Type {
+ // we got another type in the same column. in general, we treat it as an error, except
+ // the case, when old value was null, and the new one isn't null, so we can update
+ // type info for fields.
+ oldIsNull, newIsNull := r.Fields[j].Type == MYSQL_TYPE_NULL, typ == MYSQL_TYPE_NULL
+ if oldIsNull && !newIsNull { // old is null, new isn't, update type info.
+ r.Fields[j].Type = typ
+ formatField(r.Fields[j], value)
+ } else if !oldIsNull && !newIsNull { // different non-null types, that's an error.
+ return nil, errors.Errorf("row types aren't consistent")
+ }
+ }
+ b, err = formatTextValue(value)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ if b == nil {
+ // NULL value is encoded as 0xfb here (without additional info about length)
+ row = append(row, 0xfb)
+ } else {
+ row = append(row, PutLengthEncodedString(b)...)
+ }
+ }
+
+ r.RowDatas = append(r.RowDatas, row)
+ }
+
+ return r, nil
+}
+
+func BuildSimpleBinaryResultset(names []string, values [][]interface{}) (*Resultset, error) {
+ r := new(Resultset)
+
+ r.Fields = make([]*Field, len(names))
+
+ var b []byte
+
+ bitmapLen := ((len(names) + 7 + 2) >> 3)
+
+ for i, vs := range values {
+ if len(vs) != len(r.Fields) {
+ return nil, errors.Errorf("row %d has %d column not equal %d", i, len(vs), len(r.Fields))
+ }
+
+ var row []byte
+ nullBitmap := make([]byte, bitmapLen)
+
+ row = append(row, 0)
+ row = append(row, nullBitmap...)
+
+ for j, value := range vs {
+ typ, err := fieldType(value)
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+ if i == 0 {
+ field := &Field{Type: typ}
+ r.Fields[j] = field
+ field.Name = hack.Slice(names[j])
+
+ if err = formatField(field, value); err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+ if value == nil {
+ nullBitmap[(i+2)/8] |= (1 << (uint(i+2) % 8))
+ continue
+ }
+
+ b, err = formatBinaryValue(value)
+
+ if err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ if r.Fields[j].Type == MYSQL_TYPE_VAR_STRING {
+ row = append(row, PutLengthEncodedString(b)...)
+ } else {
+ row = append(row, b...)
+ }
+ }
+
+ copy(row[1:], nullBitmap)
+
+ r.RowDatas = append(r.RowDatas, row)
+ }
+
+ return r, nil
+}
+
+func BuildSimpleResultset(names []string, values [][]interface{}, binary bool) (*Resultset, error) {
+ if binary {
+ return BuildSimpleBinaryResultset(names, values)
+ } else {
+ return BuildSimpleTextResultset(names, values)
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/state.go b/vendor/github.com/siddontang/go-mysql/mysql/state.go
new file mode 100644
index 000000000..568d84b60
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/state.go
@@ -0,0 +1,233 @@
+package mysql
+
+const (
+ DEFAULT_MYSQL_STATE = "HY000"
+)
+
+var MySQLState = map[uint16]string{
+ ER_DUP_KEY: "23000",
+ ER_OUTOFMEMORY: "HY001",
+ ER_OUT_OF_SORTMEMORY: "HY001",
+ ER_CON_COUNT_ERROR: "08004",
+ ER_BAD_HOST_ERROR: "08S01",
+ ER_HANDSHAKE_ERROR: "08S01",
+ ER_DBACCESS_DENIED_ERROR: "42000",
+ ER_ACCESS_DENIED_ERROR: "28000",
+ ER_NO_DB_ERROR: "3D000",
+ ER_UNKNOWN_COM_ERROR: "08S01",
+ ER_BAD_NULL_ERROR: "23000",
+ ER_BAD_DB_ERROR: "42000",
+ ER_TABLE_EXISTS_ERROR: "42S01",
+ ER_BAD_TABLE_ERROR: "42S02",
+ ER_NON_UNIQ_ERROR: "23000",
+ ER_SERVER_SHUTDOWN: "08S01",
+ ER_BAD_FIELD_ERROR: "42S22",
+ ER_WRONG_FIELD_WITH_GROUP: "42000",
+ ER_WRONG_SUM_SELECT: "42000",
+ ER_WRONG_GROUP_FIELD: "42000",
+ ER_WRONG_VALUE_COUNT: "21S01",
+ ER_TOO_LONG_IDENT: "42000",
+ ER_DUP_FIELDNAME: "42S21",
+ ER_DUP_KEYNAME: "42000",
+ ER_DUP_ENTRY: "23000",
+ ER_WRONG_FIELD_SPEC: "42000",
+ ER_PARSE_ERROR: "42000",
+ ER_EMPTY_QUERY: "42000",
+ ER_NONUNIQ_TABLE: "42000",
+ ER_INVALID_DEFAULT: "42000",
+ ER_MULTIPLE_PRI_KEY: "42000",
+ ER_TOO_MANY_KEYS: "42000",
+ ER_TOO_MANY_KEY_PARTS: "42000",
+ ER_TOO_LONG_KEY: "42000",
+ ER_KEY_COLUMN_DOES_NOT_EXITS: "42000",
+ ER_BLOB_USED_AS_KEY: "42000",
+ ER_TOO_BIG_FIELDLENGTH: "42000",
+ ER_WRONG_AUTO_KEY: "42000",
+ ER_FORCING_CLOSE: "08S01",
+ ER_IPSOCK_ERROR: "08S01",
+ ER_NO_SUCH_INDEX: "42S12",
+ ER_WRONG_FIELD_TERMINATORS: "42000",
+ ER_BLOBS_AND_NO_TERMINATED: "42000",
+ ER_CANT_REMOVE_ALL_FIELDS: "42000",
+ ER_CANT_DROP_FIELD_OR_KEY: "42000",
+ ER_BLOB_CANT_HAVE_DEFAULT: "42000",
+ ER_WRONG_DB_NAME: "42000",
+ ER_WRONG_TABLE_NAME: "42000",
+ ER_TOO_BIG_SELECT: "42000",
+ ER_UNKNOWN_PROCEDURE: "42000",
+ ER_WRONG_PARAMCOUNT_TO_PROCEDURE: "42000",
+ ER_UNKNOWN_TABLE: "42S02",
+ ER_FIELD_SPECIFIED_TWICE: "42000",
+ ER_UNSUPPORTED_EXTENSION: "42000",
+ ER_TABLE_MUST_HAVE_COLUMNS: "42000",
+ ER_UNKNOWN_CHARACTER_SET: "42000",
+ ER_TOO_BIG_ROWSIZE: "42000",
+ ER_WRONG_OUTER_JOIN: "42000",
+ ER_NULL_COLUMN_IN_INDEX: "42000",
+ ER_PASSWORD_ANONYMOUS_USER: "42000",
+ ER_PASSWORD_NOT_ALLOWED: "42000",
+ ER_PASSWORD_NO_MATCH: "42000",
+ ER_WRONG_VALUE_COUNT_ON_ROW: "21S01",
+ ER_INVALID_USE_OF_NULL: "22004",
+ ER_REGEXP_ERROR: "42000",
+ ER_MIX_OF_GROUP_FUNC_AND_FIELDS: "42000",
+ ER_NONEXISTING_GRANT: "42000",
+ ER_TABLEACCESS_DENIED_ERROR: "42000",
+ ER_COLUMNACCESS_DENIED_ERROR: "42000",
+ ER_ILLEGAL_GRANT_FOR_TABLE: "42000",
+ ER_GRANT_WRONG_HOST_OR_USER: "42000",
+ ER_NO_SUCH_TABLE: "42S02",
+ ER_NONEXISTING_TABLE_GRANT: "42000",
+ ER_NOT_ALLOWED_COMMAND: "42000",
+ ER_SYNTAX_ERROR: "42000",
+ ER_ABORTING_CONNECTION: "08S01",
+ ER_NET_PACKET_TOO_LARGE: "08S01",
+ ER_NET_READ_ERROR_FROM_PIPE: "08S01",
+ ER_NET_FCNTL_ERROR: "08S01",
+ ER_NET_PACKETS_OUT_OF_ORDER: "08S01",
+ ER_NET_UNCOMPRESS_ERROR: "08S01",
+ ER_NET_READ_ERROR: "08S01",
+ ER_NET_READ_INTERRUPTED: "08S01",
+ ER_NET_ERROR_ON_WRITE: "08S01",
+ ER_NET_WRITE_INTERRUPTED: "08S01",
+ ER_TOO_LONG_STRING: "42000",
+ ER_TABLE_CANT_HANDLE_BLOB: "42000",
+ ER_TABLE_CANT_HANDLE_AUTO_INCREMENT: "42000",
+ ER_WRONG_COLUMN_NAME: "42000",
+ ER_WRONG_KEY_COLUMN: "42000",
+ ER_DUP_UNIQUE: "23000",
+ ER_BLOB_KEY_WITHOUT_LENGTH: "42000",
+ ER_PRIMARY_CANT_HAVE_NULL: "42000",
+ ER_TOO_MANY_ROWS: "42000",
+ ER_REQUIRES_PRIMARY_KEY: "42000",
+ ER_KEY_DOES_NOT_EXITS: "42000",
+ ER_CHECK_NO_SUCH_TABLE: "42000",
+ ER_CHECK_NOT_IMPLEMENTED: "42000",
+ ER_CANT_DO_THIS_DURING_AN_TRANSACTION: "25000",
+ ER_NEW_ABORTING_CONNECTION: "08S01",
+ ER_MASTER_NET_READ: "08S01",
+ ER_MASTER_NET_WRITE: "08S01",
+ ER_TOO_MANY_USER_CONNECTIONS: "42000",
+ ER_READ_ONLY_TRANSACTION: "25000",
+ ER_NO_PERMISSION_TO_CREATE_USER: "42000",
+ ER_LOCK_DEADLOCK: "40001",
+ ER_NO_REFERENCED_ROW: "23000",
+ ER_ROW_IS_REFERENCED: "23000",
+ ER_CONNECT_TO_MASTER: "08S01",
+ ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT: "21000",
+ ER_USER_LIMIT_REACHED: "42000",
+ ER_SPECIFIC_ACCESS_DENIED_ERROR: "42000",
+ ER_NO_DEFAULT: "42000",
+ ER_WRONG_VALUE_FOR_VAR: "42000",
+ ER_WRONG_TYPE_FOR_VAR: "42000",
+ ER_CANT_USE_OPTION_HERE: "42000",
+ ER_NOT_SUPPORTED_YET: "42000",
+ ER_WRONG_FK_DEF: "42000",
+ ER_OPERAND_COLUMNS: "21000",
+ ER_SUBQUERY_NO_1_ROW: "21000",
+ ER_ILLEGAL_REFERENCE: "42S22",
+ ER_DERIVED_MUST_HAVE_ALIAS: "42000",
+ ER_SELECT_REDUCED: "01000",
+ ER_TABLENAME_NOT_ALLOWED_HERE: "42000",
+ ER_NOT_SUPPORTED_AUTH_MODE: "08004",
+ ER_SPATIAL_CANT_HAVE_NULL: "42000",
+ ER_COLLATION_CHARSET_MISMATCH: "42000",
+ ER_WARN_TOO_FEW_RECORDS: "01000",
+ ER_WARN_TOO_MANY_RECORDS: "01000",
+ ER_WARN_NULL_TO_NOTNULL: "22004",
+ ER_WARN_DATA_OUT_OF_RANGE: "22003",
+ WARN_DATA_TRUNCATED: "01000",
+ ER_WRONG_NAME_FOR_INDEX: "42000",
+ ER_WRONG_NAME_FOR_CATALOG: "42000",
+ ER_UNKNOWN_STORAGE_ENGINE: "42000",
+ ER_TRUNCATED_WRONG_VALUE: "22007",
+ ER_SP_NO_RECURSIVE_CREATE: "2F003",
+ ER_SP_ALREADY_EXISTS: "42000",
+ ER_SP_DOES_NOT_EXIST: "42000",
+ ER_SP_LILABEL_MISMATCH: "42000",
+ ER_SP_LABEL_REDEFINE: "42000",
+ ER_SP_LABEL_MISMATCH: "42000",
+ ER_SP_UNINIT_VAR: "01000",
+ ER_SP_BADSELECT: "0A000",
+ ER_SP_BADRETURN: "42000",
+ ER_SP_BADSTATEMENT: "0A000",
+ ER_UPDATE_LOG_DEPRECATED_IGNORED: "42000",
+ ER_UPDATE_LOG_DEPRECATED_TRANSLATED: "42000",
+ ER_QUERY_INTERRUPTED: "70100",
+ ER_SP_WRONG_NO_OF_ARGS: "42000",
+ ER_SP_COND_MISMATCH: "42000",
+ ER_SP_NORETURN: "42000",
+ ER_SP_NORETURNEND: "2F005",
+ ER_SP_BAD_CURSOR_QUERY: "42000",
+ ER_SP_BAD_CURSOR_SELECT: "42000",
+ ER_SP_CURSOR_MISMATCH: "42000",
+ ER_SP_CURSOR_ALREADY_OPEN: "24000",
+ ER_SP_CURSOR_NOT_OPEN: "24000",
+ ER_SP_UNDECLARED_VAR: "42000",
+ ER_SP_FETCH_NO_DATA: "02000",
+ ER_SP_DUP_PARAM: "42000",
+ ER_SP_DUP_VAR: "42000",
+ ER_SP_DUP_COND: "42000",
+ ER_SP_DUP_CURS: "42000",
+ ER_SP_SUBSELECT_NYI: "0A000",
+ ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG: "0A000",
+ ER_SP_VARCOND_AFTER_CURSHNDLR: "42000",
+ ER_SP_CURSOR_AFTER_HANDLER: "42000",
+ ER_SP_CASE_NOT_FOUND: "20000",
+ ER_DIVISION_BY_ZERO: "22012",
+ ER_ILLEGAL_VALUE_FOR_TYPE: "22007",
+ ER_PROCACCESS_DENIED_ERROR: "42000",
+ ER_XAER_NOTA: "XAE04",
+ ER_XAER_INVAL: "XAE05",
+ ER_XAER_RMFAIL: "XAE07",
+ ER_XAER_OUTSIDE: "XAE09",
+ ER_XAER_RMERR: "XAE03",
+ ER_XA_RBROLLBACK: "XA100",
+ ER_NONEXISTING_PROC_GRANT: "42000",
+ ER_DATA_TOO_LONG: "22001",
+ ER_SP_BAD_SQLSTATE: "42000",
+ ER_CANT_CREATE_USER_WITH_GRANT: "42000",
+ ER_SP_DUP_HANDLER: "42000",
+ ER_SP_NOT_VAR_ARG: "42000",
+ ER_SP_NO_RETSET: "0A000",
+ ER_CANT_CREATE_GEOMETRY_OBJECT: "22003",
+ ER_TOO_BIG_SCALE: "42000",
+ ER_TOO_BIG_PRECISION: "42000",
+ ER_M_BIGGER_THAN_D: "42000",
+ ER_TOO_LONG_BODY: "42000",
+ ER_TOO_BIG_DISPLAYWIDTH: "42000",
+ ER_XAER_DUPID: "XAE08",
+ ER_DATETIME_FUNCTION_OVERFLOW: "22008",
+ ER_ROW_IS_REFERENCED_2: "23000",
+ ER_NO_REFERENCED_ROW_2: "23000",
+ ER_SP_BAD_VAR_SHADOW: "42000",
+ ER_SP_WRONG_NAME: "42000",
+ ER_SP_NO_AGGREGATE: "42000",
+ ER_MAX_PREPARED_STMT_COUNT_REACHED: "42000",
+ ER_NON_GROUPING_FIELD_USED: "42000",
+ ER_FOREIGN_DUPLICATE_KEY_OLD_UNUSED: "23000",
+ ER_CANT_CHANGE_TX_CHARACTERISTICS: "25001",
+ ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT: "42000",
+ ER_WRONG_PARAMETERS_TO_NATIVE_FCT: "42000",
+ ER_WRONG_PARAMETERS_TO_STORED_FCT: "42000",
+ ER_DUP_ENTRY_WITH_KEY_NAME: "23000",
+ ER_XA_RBTIMEOUT: "XA106",
+ ER_XA_RBDEADLOCK: "XA102",
+ ER_FUNC_INEXISTENT_NAME_COLLISION: "42000",
+ ER_DUP_SIGNAL_SET: "42000",
+ ER_SIGNAL_WARN: "01000",
+ ER_SIGNAL_NOT_FOUND: "02000",
+ ER_SIGNAL_EXCEPTION: "HY000",
+ ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER: "0K000",
+ ER_SPATIAL_MUST_HAVE_GEOM_COL: "42000",
+ ER_DATA_OUT_OF_RANGE: "22003",
+ ER_ACCESS_DENIED_NO_PASSWORD_ERROR: "28000",
+ ER_TRUNCATE_ILLEGAL_FK: "42000",
+ ER_DA_INVALID_CONDITION_NUMBER: "35000",
+ ER_FOREIGN_DUPLICATE_KEY_WITH_CHILD_INFO: "23000",
+ ER_FOREIGN_DUPLICATE_KEY_WITHOUT_CHILD_INFO: "23000",
+ ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION: "25006",
+ ER_ALTER_OPERATION_NOT_SUPPORTED: "0A000",
+ ER_ALTER_OPERATION_NOT_SUPPORTED_REASON: "0A000",
+ ER_DUP_UNKNOWN_IN_INDEX: "23000",
+}
diff --git a/vendor/github.com/siddontang/go-mysql/mysql/util.go b/vendor/github.com/siddontang/go-mysql/mysql/util.go
new file mode 100644
index 000000000..7fe41fa21
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/mysql/util.go
@@ -0,0 +1,354 @@
+package mysql
+
+import (
+ "crypto/rand"
+ "crypto/sha1"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+
+ "github.com/juju/errors"
+ "github.com/siddontang/go/hack"
+)
+
+func Pstack() string {
+ buf := make([]byte, 1024)
+ n := runtime.Stack(buf, false)
+ return string(buf[0:n])
+}
+
+func CalcPassword(scramble, password []byte) []byte {
+ if len(password) == 0 {
+ return nil
+ }
+
+ // stage1Hash = SHA1(password)
+ crypt := sha1.New()
+ crypt.Write(password)
+ stage1 := crypt.Sum(nil)
+
+ // scrambleHash = SHA1(scramble + SHA1(stage1Hash))
+ // inner Hash
+ crypt.Reset()
+ crypt.Write(stage1)
+ hash := crypt.Sum(nil)
+
+ // outer Hash
+ crypt.Reset()
+ crypt.Write(scramble)
+ crypt.Write(hash)
+ scramble = crypt.Sum(nil)
+
+ // token = scrambleHash XOR stage1Hash
+ for i := range scramble {
+ scramble[i] ^= stage1[i]
+ }
+ return scramble
+}
+
+func RandomBuf(size int) ([]byte, error) {
+ buf := make([]byte, size)
+
+ if _, err := io.ReadFull(rand.Reader, buf); err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ // avoid to generate '\0'
+ for i, b := range buf {
+ if uint8(b) == 0 {
+ buf[i] = '0'
+ }
+ }
+
+ return buf, nil
+}
+
+// little endian
+func FixedLengthInt(buf []byte) uint64 {
+ var num uint64 = 0
+ for i, b := range buf {
+ num |= uint64(b) << (uint(i) * 8)
+ }
+ return num
+}
+
+// big endian
+func BFixedLengthInt(buf []byte) uint64 {
+ var num uint64 = 0
+ for i, b := range buf {
+ num |= uint64(b) << (uint(len(buf)-i-1) * 8)
+ }
+ return num
+}
+
+func LengthEncodedInt(b []byte) (num uint64, isNull bool, n int) {
+ switch b[0] {
+
+ // 251: NULL
+ case 0xfb:
+ n = 1
+ isNull = true
+ return
+
+ // 252: value of following 2
+ case 0xfc:
+ num = uint64(b[1]) | uint64(b[2])<<8
+ n = 3
+ return
+
+ // 253: value of following 3
+ case 0xfd:
+ num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16
+ n = 4
+ return
+
+ // 254: value of following 8
+ case 0xfe:
+ num = uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 |
+ uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 |
+ uint64(b[7])<<48 | uint64(b[8])<<56
+ n = 9
+ return
+ }
+
+ // 0-250: value of first byte
+ num = uint64(b[0])
+ n = 1
+ return
+}
+
+func PutLengthEncodedInt(n uint64) []byte {
+ switch {
+ case n <= 250:
+ return []byte{byte(n)}
+
+ case n <= 0xffff:
+ return []byte{0xfc, byte(n), byte(n >> 8)}
+
+ case n <= 0xffffff:
+ return []byte{0xfd, byte(n), byte(n >> 8), byte(n >> 16)}
+
+ case n <= 0xffffffffffffffff:
+ return []byte{0xfe, byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24),
+ byte(n >> 32), byte(n >> 40), byte(n >> 48), byte(n >> 56)}
+ }
+ return nil
+}
+
+func LengthEnodedString(b []byte) ([]byte, bool, int, error) {
+ // Get length
+ num, isNull, n := LengthEncodedInt(b)
+ if num < 1 {
+ return nil, isNull, n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return b[n-int(num) : n], false, n, nil
+ }
+ return nil, false, n, io.EOF
+}
+
+func SkipLengthEnodedString(b []byte) (int, error) {
+ // Get length
+ num, _, n := LengthEncodedInt(b)
+ if num < 1 {
+ return n, nil
+ }
+
+ n += int(num)
+
+ // Check data length
+ if len(b) >= n {
+ return n, nil
+ }
+ return n, io.EOF
+}
+
+func PutLengthEncodedString(b []byte) []byte {
+ data := make([]byte, 0, len(b)+9)
+ data = append(data, PutLengthEncodedInt(uint64(len(b)))...)
+ data = append(data, b...)
+ return data
+}
+
+func Uint16ToBytes(n uint16) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ }
+}
+
+func Uint32ToBytes(n uint32) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ }
+}
+
+func Uint64ToBytes(n uint64) []byte {
+ return []byte{
+ byte(n),
+ byte(n >> 8),
+ byte(n >> 16),
+ byte(n >> 24),
+ byte(n >> 32),
+ byte(n >> 40),
+ byte(n >> 48),
+ byte(n >> 56),
+ }
+}
+
+func FormatBinaryDate(n int, data []byte) ([]byte, error) {
+ switch n {
+ case 0:
+ return []byte("0000-00-00"), nil
+ case 4:
+ return []byte(fmt.Sprintf("%04d-%02d-%02d",
+ binary.LittleEndian.Uint16(data[:2]),
+ data[2],
+ data[3])), nil
+ default:
+ return nil, errors.Errorf("invalid date packet length %d", n)
+ }
+}
+
+func FormatBinaryDateTime(n int, data []byte) ([]byte, error) {
+ switch n {
+ case 0:
+ return []byte("0000-00-00 00:00:00"), nil
+ case 4:
+ return []byte(fmt.Sprintf("%04d-%02d-%02d 00:00:00",
+ binary.LittleEndian.Uint16(data[:2]),
+ data[2],
+ data[3])), nil
+ case 7:
+ return []byte(fmt.Sprintf(
+ "%04d-%02d-%02d %02d:%02d:%02d",
+ binary.LittleEndian.Uint16(data[:2]),
+ data[2],
+ data[3],
+ data[4],
+ data[5],
+ data[6])), nil
+ case 11:
+ return []byte(fmt.Sprintf(
+ "%04d-%02d-%02d %02d:%02d:%02d.%06d",
+ binary.LittleEndian.Uint16(data[:2]),
+ data[2],
+ data[3],
+ data[4],
+ data[5],
+ data[6],
+ binary.LittleEndian.Uint32(data[7:11]))), nil
+ default:
+ return nil, errors.Errorf("invalid datetime packet length %d", n)
+ }
+}
+
+func FormatBinaryTime(n int, data []byte) ([]byte, error) {
+ if n == 0 {
+ return []byte("0000-00-00"), nil
+ }
+
+ var sign byte
+ if data[0] == 1 {
+ sign = byte('-')
+ }
+
+ switch n {
+ case 8:
+ return []byte(fmt.Sprintf(
+ "%c%02d:%02d:%02d",
+ sign,
+ uint16(data[1])*24+uint16(data[5]),
+ data[6],
+ data[7],
+ )), nil
+ case 12:
+ return []byte(fmt.Sprintf(
+ "%c%02d:%02d:%02d.%06d",
+ sign,
+ uint16(data[1])*24+uint16(data[5]),
+ data[6],
+ data[7],
+ binary.LittleEndian.Uint32(data[8:12]),
+ )), nil
+ default:
+ return nil, errors.Errorf("invalid time packet length %d", n)
+ }
+}
+
+var (
+ DONTESCAPE = byte(255)
+
+ EncodeMap [256]byte
+)
+
+// only support utf-8
+func Escape(sql string) string {
+ dest := make([]byte, 0, 2*len(sql))
+
+ for _, w := range hack.Slice(sql) {
+ if c := EncodeMap[w]; c == DONTESCAPE {
+ dest = append(dest, w)
+ } else {
+ dest = append(dest, '\\', c)
+ }
+ }
+
+ return string(dest)
+}
+
+func GetNetProto(addr string) string {
+ if strings.Contains(addr, "/") {
+ return "unix"
+ } else {
+ return "tcp"
+ }
+}
+
+// ErrorEqual returns a boolean indicating whether err1 is equal to err2.
+func ErrorEqual(err1, err2 error) bool {
+ e1 := errors.Cause(err1)
+ e2 := errors.Cause(err2)
+
+ if e1 == e2 {
+ return true
+ }
+
+ if e1 == nil || e2 == nil {
+ return e1 == e2
+ }
+
+ return e1.Error() == e2.Error()
+}
+
+var encodeRef = map[byte]byte{
+ '\x00': '0',
+ '\'': '\'',
+ '"': '"',
+ '\b': 'b',
+ '\n': 'n',
+ '\r': 'r',
+ '\t': 't',
+ 26: 'Z', // ctl-Z
+ '\\': '\\',
+}
+
+func init() {
+ for i := range EncodeMap {
+ EncodeMap[i] = DONTESCAPE
+ }
+ for i := range EncodeMap {
+ if to, ok := encodeRef[byte(i)]; ok {
+ EncodeMap[byte(i)] = to
+ }
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/packet/conn.go b/vendor/github.com/siddontang/go-mysql/packet/conn.go
new file mode 100644
index 000000000..3772e1a33
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/packet/conn.go
@@ -0,0 +1,163 @@
+package packet
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+ "net"
+
+ "github.com/juju/errors"
+ . "github.com/siddontang/go-mysql/mysql"
+)
+
+/*
+ Conn is the base class to handle MySQL protocol.
+*/
+type Conn struct {
+ net.Conn
+ br *bufio.Reader
+
+ Sequence uint8
+}
+
+func NewConn(conn net.Conn) *Conn {
+ c := new(Conn)
+
+ c.br = bufio.NewReaderSize(conn, 4096)
+ c.Conn = conn
+
+ return c
+}
+
+func (c *Conn) ReadPacket() ([]byte, error) {
+ var buf bytes.Buffer
+
+ if err := c.ReadPacketTo(&buf); err != nil {
+ return nil, errors.Trace(err)
+ } else {
+ return buf.Bytes(), nil
+ }
+
+ // header := []byte{0, 0, 0, 0}
+
+ // if _, err := io.ReadFull(c.br, header); err != nil {
+ // return nil, ErrBadConn
+ // }
+
+ // length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
+ // if length < 1 {
+ // return nil, fmt.Errorf("invalid payload length %d", length)
+ // }
+
+ // sequence := uint8(header[3])
+
+ // if sequence != c.Sequence {
+ // return nil, fmt.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
+ // }
+
+ // c.Sequence++
+
+ // data := make([]byte, length)
+ // if _, err := io.ReadFull(c.br, data); err != nil {
+ // return nil, ErrBadConn
+ // } else {
+ // if length < MaxPayloadLen {
+ // return data, nil
+ // }
+
+ // var buf []byte
+ // buf, err = c.ReadPacket()
+ // if err != nil {
+ // return nil, ErrBadConn
+ // } else {
+ // return append(data, buf...), nil
+ // }
+ // }
+}
+
+func (c *Conn) ReadPacketTo(w io.Writer) error {
+ header := []byte{0, 0, 0, 0}
+
+ if _, err := io.ReadFull(c.br, header); err != nil {
+ return ErrBadConn
+ }
+
+ length := int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16)
+ if length < 1 {
+ return errors.Errorf("invalid payload length %d", length)
+ }
+
+ sequence := uint8(header[3])
+
+ if sequence != c.Sequence {
+ return errors.Errorf("invalid sequence %d != %d", sequence, c.Sequence)
+ }
+
+ c.Sequence++
+
+ if n, err := io.CopyN(w, c.br, int64(length)); err != nil {
+ return ErrBadConn
+ } else if n != int64(length) {
+ return ErrBadConn
+ } else {
+ if length < MaxPayloadLen {
+ return nil
+ }
+
+ if err := c.ReadPacketTo(w); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// data already has 4 bytes header
+// will modify data inplace
+func (c *Conn) WritePacket(data []byte) error {
+ length := len(data) - 4
+
+ for length >= MaxPayloadLen {
+ data[0] = 0xff
+ data[1] = 0xff
+ data[2] = 0xff
+
+ data[3] = c.Sequence
+
+ if n, err := c.Write(data[:4+MaxPayloadLen]); err != nil {
+ return ErrBadConn
+ } else if n != (4 + MaxPayloadLen) {
+ return ErrBadConn
+ } else {
+ c.Sequence++
+ length -= MaxPayloadLen
+ data = data[MaxPayloadLen:]
+ }
+ }
+
+ data[0] = byte(length)
+ data[1] = byte(length >> 8)
+ data[2] = byte(length >> 16)
+ data[3] = c.Sequence
+
+ if n, err := c.Write(data); err != nil {
+ return ErrBadConn
+ } else if n != len(data) {
+ return ErrBadConn
+ } else {
+ c.Sequence++
+ return nil
+ }
+}
+
+func (c *Conn) ResetSequence() {
+ c.Sequence = 0
+}
+
+func (c *Conn) Close() error {
+ c.Sequence = 0
+ if c.Conn != nil {
+ return c.Conn.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go-mysql/server/auth.go b/vendor/github.com/siddontang/go-mysql/server/auth.go
new file mode 100644
index 000000000..b66ea4e0c
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/server/auth.go
@@ -0,0 +1,119 @@
+package server
+
+import (
+ "bytes"
+ "encoding/binary"
+
+ . "github.com/siddontang/go-mysql/mysql"
+)
+
+func (c *Conn) writeInitialHandshake() error {
+ capability := CLIENT_LONG_PASSWORD | CLIENT_LONG_FLAG |
+ CLIENT_CONNECT_WITH_DB | CLIENT_PROTOCOL_41 |
+ CLIENT_TRANSACTIONS | CLIENT_SECURE_CONNECTION
+
+ data := make([]byte, 4, 128)
+
+ //min version 10
+ data = append(data, 10)
+
+ //server version[00]
+ data = append(data, ServerVersion...)
+ data = append(data, 0)
+
+ //connection id
+ data = append(data, byte(c.connectionID), byte(c.connectionID>>8), byte(c.connectionID>>16), byte(c.connectionID>>24))
+
+ //auth-plugin-data-part-1
+ data = append(data, c.salt[0:8]...)
+
+ //filter [00]
+ data = append(data, 0)
+
+ //capability flag lower 2 bytes, using default capability here
+ data = append(data, byte(capability), byte(capability>>8))
+
+ //charset, utf-8 default
+ data = append(data, uint8(DEFAULT_COLLATION_ID))
+
+ //status
+ data = append(data, byte(c.status), byte(c.status>>8))
+
+ //below 13 byte may not be used
+ //capability flag upper 2 bytes, using default capability here
+ data = append(data, byte(capability>>16), byte(capability>>24))
+
+ //filter [0x15], for wireshark dump, value is 0x15
+ data = append(data, 0x15)
+
+ //reserved 10 [00]
+ data = append(data, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ //auth-plugin-data-part-2
+ data = append(data, c.salt[8:]...)
+
+ //filter [00]
+ data = append(data, 0)
+
+ return c.WritePacket(data)
+}
+
+func (c *Conn) readHandshakeResponse(password string) error {
+ data, err := c.ReadPacket()
+
+ if err != nil {
+ return err
+ }
+
+ pos := 0
+
+ //capability
+ c.capability = binary.LittleEndian.Uint32(data[:4])
+ pos += 4
+
+ //skip max packet size
+ pos += 4
+
+ //charset, skip, if you want to use another charset, use set names
+ //c.collation = CollationId(data[pos])
+ pos++
+
+ //skip reserved 23[00]
+ pos += 23
+
+ //user name
+ user := string(data[pos : pos+bytes.IndexByte(data[pos:], 0)])
+ pos += len(user) + 1
+
+ if c.user != user {
+ return NewDefaultError(ER_NO_SUCH_USER, user, c.RemoteAddr().String())
+ }
+
+ //auth length and auth
+ authLen := int(data[pos])
+ pos++
+ auth := data[pos : pos+authLen]
+
+ checkAuth := CalcPassword(c.salt, []byte(password))
+
+ if !bytes.Equal(auth, checkAuth) {
+ return NewDefaultError(ER_ACCESS_DENIED_ERROR, c.RemoteAddr().String(), c.user, "Yes")
+ }
+
+ pos += authLen
+
+ if c.capability|CLIENT_CONNECT_WITH_DB > 0 {
+ if len(data[pos:]) == 0 {
+ return nil
+ }
+
+ db := string(data[pos : pos+bytes.IndexByte(data[pos:], 0)])
+ pos += len(db) + 1
+
+ if err = c.h.UseDB(db); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go-mysql/server/command.go b/vendor/github.com/siddontang/go-mysql/server/command.go
new file mode 100644
index 000000000..3bc23ac33
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/server/command.go
@@ -0,0 +1,160 @@
+package server
+
+import (
+ "bytes"
+ "fmt"
+
+ . "github.com/siddontang/go-mysql/mysql"
+ "github.com/siddontang/go/hack"
+)
+
+type Handler interface {
+ //handle COM_INIT_DB command, you can check whether the dbName is valid, or other.
+ UseDB(dbName string) error
+ //handle COM_QUERY comamnd, like SELECT, INSERT, UPDATE, etc...
+ //If Result has a Resultset (SELECT, SHOW, etc...), we will send this as the repsonse, otherwise, we will send Result
+ HandleQuery(query string) (*Result, error)
+ //handle COM_FILED_LIST command
+ HandleFieldList(table string, fieldWildcard string) ([]*Field, error)
+ //handle COM_STMT_PREPARE, params is the param number for this statement, columns is the column number
+ //context will be used later for statement execute
+ HandleStmtPrepare(query string) (params int, columns int, context interface{}, err error)
+ //handle COM_STMT_EXECUTE, context is the previous one set in prepare
+ //query is the statement prepare query, and args is the params for this statement
+ HandleStmtExecute(context interface{}, query string, args []interface{}) (*Result, error)
+ //handle COM_STMT_CLOSE, context is the previous one set in prepare
+ //this handler has no response
+ HandleStmtClose(context interface{}) error
+ //handle any other command that is not currently handled by the library,
+ //default implementation for this method will return an ER_UNKNOWN_ERROR
+ HandleOtherCommand(cmd byte, data []byte) error
+}
+
+func (c *Conn) HandleCommand() error {
+ if c.Conn == nil {
+ return fmt.Errorf("connection closed")
+ }
+
+ data, err := c.ReadPacket()
+ if err != nil {
+ c.Close()
+ c.Conn = nil
+ return err
+ }
+
+ v := c.dispatch(data)
+
+ err = c.writeValue(v)
+
+ if c.Conn != nil {
+ c.ResetSequence()
+ }
+
+ if err != nil {
+ c.Close()
+ c.Conn = nil
+ }
+ return err
+}
+
+func (c *Conn) dispatch(data []byte) interface{} {
+ cmd := data[0]
+ data = data[1:]
+
+ switch cmd {
+ case COM_QUIT:
+ c.Close()
+ c.Conn = nil
+ return noResponse{}
+ case COM_QUERY:
+ if r, err := c.h.HandleQuery(hack.String(data)); err != nil {
+ return err
+ } else {
+ return r
+ }
+ case COM_PING:
+ return nil
+ case COM_INIT_DB:
+ if err := c.h.UseDB(hack.String(data)); err != nil {
+ return err
+ } else {
+ return nil
+ }
+ case COM_FIELD_LIST:
+ index := bytes.IndexByte(data, 0x00)
+ table := hack.String(data[0:index])
+ wildcard := hack.String(data[index+1:])
+
+ if fs, err := c.h.HandleFieldList(table, wildcard); err != nil {
+ return err
+ } else {
+ return fs
+ }
+ case COM_STMT_PREPARE:
+ c.stmtID++
+ st := new(Stmt)
+ st.ID = c.stmtID
+ st.Query = hack.String(data)
+ var err error
+ if st.Params, st.Columns, st.Context, err = c.h.HandleStmtPrepare(st.Query); err != nil {
+ return err
+ } else {
+ st.ResetParams()
+ c.stmts[c.stmtID] = st
+ return st
+ }
+ case COM_STMT_EXECUTE:
+ if r, err := c.handleStmtExecute(data); err != nil {
+ return err
+ } else {
+ return r
+ }
+ case COM_STMT_CLOSE:
+ c.handleStmtClose(data)
+ return noResponse{}
+ case COM_STMT_SEND_LONG_DATA:
+ c.handleStmtSendLongData(data)
+ return noResponse{}
+ case COM_STMT_RESET:
+ if r, err := c.handleStmtReset(data); err != nil {
+ return err
+ } else {
+ return r
+ }
+ default:
+ return c.h.HandleOtherCommand(cmd, data)
+ }
+
+ return fmt.Errorf("command %d is not handled correctly", cmd)
+}
+
+type EmptyHandler struct {
+}
+
+func (h EmptyHandler) UseDB(dbName string) error {
+ return nil
+}
+func (h EmptyHandler) HandleQuery(query string) (*Result, error) {
+ return nil, fmt.Errorf("not supported now")
+}
+
+func (h EmptyHandler) HandleFieldList(table string, fieldWildcard string) ([]*Field, error) {
+ return nil, fmt.Errorf("not supported now")
+}
+func (h EmptyHandler) HandleStmtPrepare(query string) (int, int, interface{}, error) {
+ return 0, 0, nil, fmt.Errorf("not supported now")
+}
+func (h EmptyHandler) HandleStmtExecute(context interface{}, query string, args []interface{}) (*Result, error) {
+ return nil, fmt.Errorf("not supported now")
+}
+
+func (h EmptyHandler) HandleStmtClose(context interface{}) error {
+ return nil
+}
+
+func (h EmptyHandler) HandleOtherCommand(cmd byte, data []byte) error {
+ return NewError(
+ ER_UNKNOWN_ERROR,
+ fmt.Sprintf("command %d is not supported now", cmd),
+ )
+}
diff --git a/vendor/github.com/siddontang/go-mysql/server/conn.go b/vendor/github.com/siddontang/go-mysql/server/conn.go
new file mode 100644
index 000000000..d6ea846ae
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/server/conn.go
@@ -0,0 +1,113 @@
+package server
+
+import (
+ "net"
+ "sync/atomic"
+
+ . "github.com/siddontang/go-mysql/mysql"
+ "github.com/siddontang/go-mysql/packet"
+ "github.com/siddontang/go/sync2"
+)
+
+/*
+ Conn acts like a MySQL server connection, you can use MySQL client to communicate with it.
+*/
+type Conn struct {
+ *packet.Conn
+
+ capability uint32
+
+ connectionID uint32
+
+ status uint16
+
+ user string
+
+ salt []byte
+
+ h Handler
+
+ stmts map[uint32]*Stmt
+ stmtID uint32
+
+ closed sync2.AtomicBool
+}
+
+var baseConnID uint32 = 10000
+
+func NewConn(conn net.Conn, user string, password string, h Handler) (*Conn, error) {
+ c := new(Conn)
+
+ c.h = h
+
+ c.user = user
+ c.Conn = packet.NewConn(conn)
+
+ c.connectionID = atomic.AddUint32(&baseConnID, 1)
+
+ c.stmts = make(map[uint32]*Stmt)
+
+ c.salt, _ = RandomBuf(20)
+
+ c.closed.Set(false)
+
+ if err := c.handshake(password); err != nil {
+ c.Close()
+ return nil, err
+ }
+
+ return c, nil
+}
+
+func (c *Conn) handshake(password string) error {
+ if err := c.writeInitialHandshake(); err != nil {
+ return err
+ }
+
+ if err := c.readHandshakeResponse(password); err != nil {
+ c.writeError(err)
+
+ return err
+ }
+
+ if err := c.writeOK(nil); err != nil {
+ return err
+ }
+
+ c.ResetSequence()
+
+ return nil
+}
+
+func (c *Conn) Close() {
+ c.closed.Set(true)
+ c.Conn.Close()
+}
+
+func (c *Conn) Closed() bool {
+ return c.closed.Get()
+}
+
+func (c *Conn) GetUser() string {
+ return c.user
+}
+
+func (c *Conn) ConnectionID() uint32 {
+ return c.connectionID
+}
+
+func (c *Conn) IsAutoCommit() bool {
+ return c.status&SERVER_STATUS_AUTOCOMMIT > 0
+}
+
+func (c *Conn) IsInTransaction() bool {
+ return c.status&SERVER_STATUS_IN_TRANS > 0
+}
+
+func (c *Conn) SetInTransaction() {
+ c.status |= SERVER_STATUS_IN_TRANS
+}
+
+func (c *Conn) ClearInTransaction() {
+ c.status &= ^SERVER_STATUS_IN_TRANS
+}
diff --git a/vendor/github.com/siddontang/go-mysql/server/resp.go b/vendor/github.com/siddontang/go-mysql/server/resp.go
new file mode 100644
index 000000000..1123032dd
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/server/resp.go
@@ -0,0 +1,142 @@
+package server
+
+import (
+ "fmt"
+
+ . "github.com/siddontang/go-mysql/mysql"
+)
+
+func (c *Conn) writeOK(r *Result) error {
+ if r == nil {
+ r = &Result{}
+ }
+
+ r.Status |= c.status
+
+ data := make([]byte, 4, 32)
+
+ data = append(data, OK_HEADER)
+
+ data = append(data, PutLengthEncodedInt(r.AffectedRows)...)
+ data = append(data, PutLengthEncodedInt(r.InsertId)...)
+
+ if c.capability&CLIENT_PROTOCOL_41 > 0 {
+ data = append(data, byte(r.Status), byte(r.Status>>8))
+ data = append(data, 0, 0)
+ }
+
+ return c.WritePacket(data)
+}
+
+func (c *Conn) writeError(e error) error {
+ var m *MyError
+ var ok bool
+ if m, ok = e.(*MyError); !ok {
+ m = NewError(ER_UNKNOWN_ERROR, e.Error())
+ }
+
+ data := make([]byte, 4, 16+len(m.Message))
+
+ data = append(data, ERR_HEADER)
+ data = append(data, byte(m.Code), byte(m.Code>>8))
+
+ if c.capability&CLIENT_PROTOCOL_41 > 0 {
+ data = append(data, '#')
+ data = append(data, m.State...)
+ }
+
+ data = append(data, m.Message...)
+
+ return c.WritePacket(data)
+}
+
+func (c *Conn) writeEOF() error {
+ data := make([]byte, 4, 9)
+
+ data = append(data, EOF_HEADER)
+ if c.capability&CLIENT_PROTOCOL_41 > 0 {
+ data = append(data, 0, 0)
+ data = append(data, byte(c.status), byte(c.status>>8))
+ }
+
+ return c.WritePacket(data)
+}
+
+func (c *Conn) writeResultset(r *Resultset) error {
+ columnLen := PutLengthEncodedInt(uint64(len(r.Fields)))
+
+ data := make([]byte, 4, 1024)
+
+ data = append(data, columnLen...)
+ if err := c.WritePacket(data); err != nil {
+ return err
+ }
+
+ for _, v := range r.Fields {
+ data = data[0:4]
+ data = append(data, v.Dump()...)
+ if err := c.WritePacket(data); err != nil {
+ return err
+ }
+ }
+
+ if err := c.writeEOF(); err != nil {
+ return err
+ }
+
+ for _, v := range r.RowDatas {
+ data = data[0:4]
+ data = append(data, v...)
+ if err := c.WritePacket(data); err != nil {
+ return err
+ }
+ }
+
+ if err := c.writeEOF(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (c *Conn) writeFieldList(fs []*Field) error {
+ data := make([]byte, 4, 1024)
+
+ for _, v := range fs {
+ data = data[0:4]
+ data = append(data, v.Dump()...)
+ if err := c.WritePacket(data); err != nil {
+ return err
+ }
+ }
+
+ if err := c.writeEOF(); err != nil {
+ return err
+ }
+ return nil
+}
+
+type noResponse struct{}
+
+func (c *Conn) writeValue(value interface{}) error {
+ switch v := value.(type) {
+ case noResponse:
+ return nil
+ case error:
+ return c.writeError(v)
+ case nil:
+ return c.writeOK(nil)
+ case *Result:
+ if v != nil && v.Resultset != nil {
+ return c.writeResultset(v.Resultset)
+ } else {
+ return c.writeOK(v)
+ }
+ case []*Field:
+ return c.writeFieldList(v)
+ case *Stmt:
+ return c.writePrepare(v)
+ default:
+ return fmt.Errorf("invalid response type %T", value)
+ }
+}
diff --git a/vendor/github.com/siddontang/go-mysql/server/stmt.go b/vendor/github.com/siddontang/go-mysql/server/stmt.go
new file mode 100644
index 000000000..7a325d71e
--- /dev/null
+++ b/vendor/github.com/siddontang/go-mysql/server/stmt.go
@@ -0,0 +1,363 @@
+package server
+
+import (
+ "encoding/binary"
+ "fmt"
+ "math"
+ "strconv"
+
+ "github.com/juju/errors"
+ . "github.com/siddontang/go-mysql/mysql"
+)
+
+var paramFieldData []byte
+var columnFieldData []byte
+
+func init() {
+ var p = &Field{Name: []byte("?")}
+ var c = &Field{}
+
+ paramFieldData = p.Dump()
+ columnFieldData = c.Dump()
+}
+
+type Stmt struct {
+ ID uint32
+ Query string
+
+ Params int
+ Columns int
+
+ Args []interface{}
+
+ Context interface{}
+}
+
+func (s *Stmt) Rest(params int, columns int, context interface{}) {
+ s.Params = params
+ s.Columns = columns
+ s.Context = context
+ s.ResetParams()
+}
+
+func (s *Stmt) ResetParams() {
+ s.Args = make([]interface{}, s.Params)
+}
+
+func (c *Conn) writePrepare(s *Stmt) error {
+ data := make([]byte, 4, 128)
+
+ //status ok
+ data = append(data, 0)
+ //stmt id
+ data = append(data, Uint32ToBytes(s.ID)...)
+ //number columns
+ data = append(data, Uint16ToBytes(uint16(s.Columns))...)
+ //number params
+ data = append(data, Uint16ToBytes(uint16(s.Params))...)
+ //filter [00]
+ data = append(data, 0)
+ //warning count
+ data = append(data, 0, 0)
+
+ if err := c.WritePacket(data); err != nil {
+ return err
+ }
+
+ if s.Params > 0 {
+ for i := 0; i < s.Params; i++ {
+ data = data[0:4]
+ data = append(data, []byte(paramFieldData)...)
+
+ if err := c.WritePacket(data); err != nil {
+ return errors.Trace(err)
+ }
+ }
+
+ if err := c.writeEOF(); err != nil {
+ return err
+ }
+ }
+
+ if s.Columns > 0 {
+ for i := 0; i < s.Columns; i++ {
+ data = data[0:4]
+ data = append(data, []byte(columnFieldData)...)
+
+ if err := c.WritePacket(data); err != nil {
+ return errors.Trace(err)
+ }
+ }
+
+ if err := c.writeEOF(); err != nil {
+ return err
+ }
+
+ }
+ return nil
+}
+
+func (c *Conn) handleStmtExecute(data []byte) (*Result, error) {
+ if len(data) < 9 {
+ return nil, ErrMalformPacket
+ }
+
+ pos := 0
+ id := binary.LittleEndian.Uint32(data[0:4])
+ pos += 4
+
+ s, ok := c.stmts[id]
+ if !ok {
+ return nil, NewDefaultError(ER_UNKNOWN_STMT_HANDLER,
+ strconv.FormatUint(uint64(id), 10), "stmt_execute")
+ }
+
+ flag := data[pos]
+ pos++
+ //now we only support CURSOR_TYPE_NO_CURSOR flag
+ if flag != 0 {
+ return nil, NewError(ER_UNKNOWN_ERROR, fmt.Sprintf("unsupported flag %d", flag))
+ }
+
+ //skip iteration-count, always 1
+ pos += 4
+
+ var nullBitmaps []byte
+ var paramTypes []byte
+ var paramValues []byte
+
+ paramNum := s.Params
+
+ if paramNum > 0 {
+ nullBitmapLen := (s.Params + 7) >> 3
+ if len(data) < (pos + nullBitmapLen + 1) {
+ return nil, ErrMalformPacket
+ }
+ nullBitmaps = data[pos : pos+nullBitmapLen]
+ pos += nullBitmapLen
+
+ //new param bound flag
+ if data[pos] == 1 {
+ pos++
+ if len(data) < (pos + (paramNum << 1)) {
+ return nil, ErrMalformPacket
+ }
+
+ paramTypes = data[pos : pos+(paramNum<<1)]
+ pos += (paramNum << 1)
+
+ paramValues = data[pos:]
+ }
+
+ if err := c.bindStmtArgs(s, nullBitmaps, paramTypes, paramValues); err != nil {
+ return nil, errors.Trace(err)
+ }
+ }
+
+ var r *Result
+ var err error
+ if r, err = c.h.HandleStmtExecute(s.Context, s.Query, s.Args); err != nil {
+ return nil, errors.Trace(err)
+ }
+
+ s.ResetParams()
+
+ return r, nil
+}
+
+func (c *Conn) bindStmtArgs(s *Stmt, nullBitmap, paramTypes, paramValues []byte) error {
+ args := s.Args
+
+ pos := 0
+
+ var v []byte
+ var n int = 0
+ var isNull bool
+ var err error
+
+ for i := 0; i < s.Params; i++ {
+ if nullBitmap[i>>3]&(1<<(uint(i)%8)) > 0 {
+ args[i] = nil
+ continue
+ }
+
+ tp := paramTypes[i<<1]
+ isUnsigned := (paramTypes[(i<<1)+1] & 0x80) > 0
+
+ switch tp {
+ case MYSQL_TYPE_NULL:
+ args[i] = nil
+ continue
+
+ case MYSQL_TYPE_TINY:
+ if len(paramValues) < (pos + 1) {
+ return ErrMalformPacket
+ }
+
+ if isUnsigned {
+ args[i] = uint8(paramValues[pos])
+ } else {
+ args[i] = int8(paramValues[pos])
+ }
+
+ pos++
+ continue
+
+ case MYSQL_TYPE_SHORT, MYSQL_TYPE_YEAR:
+ if len(paramValues) < (pos + 2) {
+ return ErrMalformPacket
+ }
+
+ if isUnsigned {
+ args[i] = uint16(binary.LittleEndian.Uint16(paramValues[pos : pos+2]))
+ } else {
+ args[i] = int16((binary.LittleEndian.Uint16(paramValues[pos : pos+2])))
+ }
+ pos += 2
+ continue
+
+ case MYSQL_TYPE_INT24, MYSQL_TYPE_LONG:
+ if len(paramValues) < (pos + 4) {
+ return ErrMalformPacket
+ }
+
+ if isUnsigned {
+ args[i] = uint32(binary.LittleEndian.Uint32(paramValues[pos : pos+4]))
+ } else {
+ args[i] = int32(binary.LittleEndian.Uint32(paramValues[pos : pos+4]))
+ }
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_LONGLONG:
+ if len(paramValues) < (pos + 8) {
+ return ErrMalformPacket
+ }
+
+ if isUnsigned {
+ args[i] = binary.LittleEndian.Uint64(paramValues[pos : pos+8])
+ } else {
+ args[i] = int64(binary.LittleEndian.Uint64(paramValues[pos : pos+8]))
+ }
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_FLOAT:
+ if len(paramValues) < (pos + 4) {
+ return ErrMalformPacket
+ }
+
+ args[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(paramValues[pos : pos+4])))
+ pos += 4
+ continue
+
+ case MYSQL_TYPE_DOUBLE:
+ if len(paramValues) < (pos + 8) {
+ return ErrMalformPacket
+ }
+
+ args[i] = math.Float64frombits(binary.LittleEndian.Uint64(paramValues[pos : pos+8]))
+ pos += 8
+ continue
+
+ case MYSQL_TYPE_DECIMAL, MYSQL_TYPE_NEWDECIMAL, MYSQL_TYPE_VARCHAR,
+ MYSQL_TYPE_BIT, MYSQL_TYPE_ENUM, MYSQL_TYPE_SET, MYSQL_TYPE_TINY_BLOB,
+ MYSQL_TYPE_MEDIUM_BLOB, MYSQL_TYPE_LONG_BLOB, MYSQL_TYPE_BLOB,
+ MYSQL_TYPE_VAR_STRING, MYSQL_TYPE_STRING, MYSQL_TYPE_GEOMETRY,
+ MYSQL_TYPE_DATE, MYSQL_TYPE_NEWDATE,
+ MYSQL_TYPE_TIMESTAMP, MYSQL_TYPE_DATETIME, MYSQL_TYPE_TIME:
+ if len(paramValues) < (pos + 1) {
+ return ErrMalformPacket
+ }
+
+ v, isNull, n, err = LengthEnodedString(paramValues[pos:])
+ pos += n
+ if err != nil {
+ return errors.Trace(err)
+ }
+
+ if !isNull {
+ args[i] = v
+ continue
+ } else {
+ args[i] = nil
+ continue
+ }
+ default:
+ return errors.Errorf("Stmt Unknown FieldType %d", tp)
+ }
+ }
+ return nil
+}
+
+// stmt send long data command has no repsonse
+func (c *Conn) handleStmtSendLongData(data []byte) error {
+ if len(data) < 6 {
+ return nil
+ }
+
+ id := binary.LittleEndian.Uint32(data[0:4])
+
+ s, ok := c.stmts[id]
+ if !ok {
+ return nil
+ }
+
+ paramId := binary.LittleEndian.Uint16(data[4:6])
+ if paramId >= uint16(s.Params) {
+ return nil
+ }
+
+ if s.Args[paramId] == nil {
+ s.Args[paramId] = data[6:]
+ } else {
+ if b, ok := s.Args[paramId].([]byte); ok {
+ b = append(b, data[6:]...)
+ s.Args[paramId] = b
+ } else {
+ return nil
+ }
+ }
+
+ return nil
+}
+
+func (c *Conn) handleStmtReset(data []byte) (*Result, error) {
+ if len(data) < 4 {
+ return nil, ErrMalformPacket
+ }
+
+ id := binary.LittleEndian.Uint32(data[0:4])
+
+ s, ok := c.stmts[id]
+ if !ok {
+ return nil, NewDefaultError(ER_UNKNOWN_STMT_HANDLER,
+ strconv.FormatUint(uint64(id), 10), "stmt_reset")
+ }
+
+ s.ResetParams()
+
+ return &Result{}, nil
+}
+
+// stmt close command has no repsonse
+func (c *Conn) handleStmtClose(data []byte) error {
+ if len(data) < 4 {
+ return nil
+ }
+
+ id := binary.LittleEndian.Uint32(data[0:4])
+
+ stmt, ok := c.stmts[id]
+ if !ok {
+ return nil
+ }
+
+ if err := c.h.HandleStmtClose(stmt.Context); err != nil {
+ return err
+ }
+
+ delete(c.stmts, id)
+
+ return nil
+}
diff --git a/vendor/github.com/siddontang/go/LICENSE b/vendor/github.com/siddontang/go/LICENSE
new file mode 100644
index 000000000..80511a0a7
--- /dev/null
+++ b/vendor/github.com/siddontang/go/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 siddontang
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/siddontang/go/bson/LICENSE b/vendor/github.com/siddontang/go/bson/LICENSE
new file mode 100644
index 000000000..890326017
--- /dev/null
+++ b/vendor/github.com/siddontang/go/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/siddontang/go/filelock/LICENSE b/vendor/github.com/siddontang/go/filelock/LICENSE
new file mode 100644
index 000000000..fec05ce12
--- /dev/null
+++ b/vendor/github.com/siddontang/go/filelock/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The LevelDB-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/siddontang/go/hack/hack.go b/vendor/github.com/siddontang/go/hack/hack.go
new file mode 100644
index 000000000..74ee83cbf
--- /dev/null
+++ b/vendor/github.com/siddontang/go/hack/hack.go
@@ -0,0 +1,27 @@
+package hack
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// no copy to change slice to string
+// use your own risk
+func String(b []byte) (s string) {
+ pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ pstring.Data = pbytes.Data
+ pstring.Len = pbytes.Len
+ return
+}
+
+// no copy to change string to slice
+// use your own risk
+func Slice(s string) (b []byte) {
+ pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+ pstring := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ pbytes.Data = pstring.Data
+ pbytes.Len = pstring.Len
+ pbytes.Cap = pstring.Len
+ return
+}
diff --git a/vendor/github.com/siddontang/go/snappy/LICENSE b/vendor/github.com/siddontang/go/snappy/LICENSE
new file mode 100644
index 000000000..6050c10f4
--- /dev/null
+++ b/vendor/github.com/siddontang/go/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/siddontang/go/sync2/atomic.go b/vendor/github.com/siddontang/go/sync2/atomic.go
new file mode 100644
index 000000000..382fc20df
--- /dev/null
+++ b/vendor/github.com/siddontang/go/sync2/atomic.go
@@ -0,0 +1,146 @@
+// Copyright 2013, Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package sync2
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type AtomicInt32 int32
+
+func (i *AtomicInt32) Add(n int32) int32 {
+ return atomic.AddInt32((*int32)(i), n)
+}
+
+func (i *AtomicInt32) Set(n int32) {
+ atomic.StoreInt32((*int32)(i), n)
+}
+
+func (i *AtomicInt32) Get() int32 {
+ return atomic.LoadInt32((*int32)(i))
+}
+
+func (i *AtomicInt32) CompareAndSwap(oldval, newval int32) (swapped bool) {
+ return atomic.CompareAndSwapInt32((*int32)(i), oldval, newval)
+}
+
+type AtomicUint32 uint32
+
+func (i *AtomicUint32) Add(n uint32) uint32 {
+ return atomic.AddUint32((*uint32)(i), n)
+}
+
+func (i *AtomicUint32) Set(n uint32) {
+ atomic.StoreUint32((*uint32)(i), n)
+}
+
+func (i *AtomicUint32) Get() uint32 {
+ return atomic.LoadUint32((*uint32)(i))
+}
+
+func (i *AtomicUint32) CompareAndSwap(oldval, newval uint32) (swapped bool) {
+ return atomic.CompareAndSwapUint32((*uint32)(i), oldval, newval)
+}
+
+type AtomicInt64 int64
+
+func (i *AtomicInt64) Add(n int64) int64 {
+ return atomic.AddInt64((*int64)(i), n)
+}
+
+func (i *AtomicInt64) Set(n int64) {
+ atomic.StoreInt64((*int64)(i), n)
+}
+
+func (i *AtomicInt64) Get() int64 {
+ return atomic.LoadInt64((*int64)(i))
+}
+
+func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) {
+ return atomic.CompareAndSwapInt64((*int64)(i), oldval, newval)
+}
+
+type AtomicUint64 uint64
+
+func (i *AtomicUint64) Add(n uint64) uint64 {
+ return atomic.AddUint64((*uint64)(i), n)
+}
+
+func (i *AtomicUint64) Set(n uint64) {
+ atomic.StoreUint64((*uint64)(i), n)
+}
+
+func (i *AtomicUint64) Get() uint64 {
+ return atomic.LoadUint64((*uint64)(i))
+}
+
+func (i *AtomicUint64) CompareAndSwap(oldval, newval uint64) (swapped bool) {
+ return atomic.CompareAndSwapUint64((*uint64)(i), oldval, newval)
+}
+
+type AtomicDuration int64
+
+func (d *AtomicDuration) Add(duration time.Duration) time.Duration {
+ return time.Duration(atomic.AddInt64((*int64)(d), int64(duration)))
+}
+
+func (d *AtomicDuration) Set(duration time.Duration) {
+ atomic.StoreInt64((*int64)(d), int64(duration))
+}
+
+func (d *AtomicDuration) Get() time.Duration {
+ return time.Duration(atomic.LoadInt64((*int64)(d)))
+}
+
+func (d *AtomicDuration) CompareAndSwap(oldval, newval time.Duration) (swapped bool) {
+ return atomic.CompareAndSwapInt64((*int64)(d), int64(oldval), int64(newval))
+}
+
+// AtomicString gives you atomic-style APIs for string, but
+// it's only a convenience wrapper that uses a mutex. So, it's
+// not as efficient as the rest of the atomic types.
+type AtomicString struct {
+ mu sync.Mutex
+ str string
+}
+
+func (s *AtomicString) Set(str string) {
+ s.mu.Lock()
+ s.str = str
+ s.mu.Unlock()
+}
+
+func (s *AtomicString) Get() string {
+ s.mu.Lock()
+ str := s.str
+ s.mu.Unlock()
+ return str
+}
+
+func (s *AtomicString) CompareAndSwap(oldval, newval string) (swapped bool) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.str == oldval {
+ s.str = newval
+ return true
+ }
+ return false
+}
+
+type AtomicBool int32
+
+func (b *AtomicBool) Set(v bool) {
+ if v {
+ atomic.StoreInt32((*int32)(b), 1)
+ } else {
+ atomic.StoreInt32((*int32)(b), 0)
+ }
+}
+
+func (b *AtomicBool) Get() bool {
+ return atomic.LoadInt32((*int32)(b)) == 1
+}
diff --git a/vendor/github.com/siddontang/go/sync2/semaphore.go b/vendor/github.com/siddontang/go/sync2/semaphore.go
new file mode 100644
index 000000000..d310da729
--- /dev/null
+++ b/vendor/github.com/siddontang/go/sync2/semaphore.go
@@ -0,0 +1,65 @@
+package sync2
+
+import (
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+func NewSemaphore(initialCount int) *Semaphore {
+ res := &Semaphore{
+ counter: int64(initialCount),
+ }
+ res.cond.L = &res.lock
+ return res
+}
+
+type Semaphore struct {
+ lock sync.Mutex
+ cond sync.Cond
+ counter int64
+}
+
+func (s *Semaphore) Release() {
+ s.lock.Lock()
+ s.counter += 1
+ if s.counter >= 0 {
+ s.cond.Signal()
+ }
+ s.lock.Unlock()
+}
+
+func (s *Semaphore) Acquire() {
+ s.lock.Lock()
+ for s.counter < 1 {
+ s.cond.Wait()
+ }
+ s.counter -= 1
+ s.lock.Unlock()
+}
+
+func (s *Semaphore) AcquireTimeout(timeout time.Duration) bool {
+ done := make(chan bool, 1)
+ // Gate used to communicate between the threads and decide what the result
+ // is. If the main thread decides, we have timed out, otherwise we succeed.
+ decided := new(int32)
+ go func() {
+ s.Acquire()
+ if atomic.SwapInt32(decided, 1) == 0 {
+ done <- true
+ } else {
+ // If we already decided the result, and this thread did not win
+ s.Release()
+ }
+ }()
+ select {
+ case <-done:
+ return true
+ case <-time.NewTimer(timeout).C:
+ if atomic.SwapInt32(decided, 1) == 1 {
+ // The other thread already decided the result
+ return true
+ }
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml
index 2f19b4a75..1f953bebd 100644
--- a/vendor/github.com/sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/sirupsen/logrus/.travis.yml
@@ -1,13 +1,51 @@
language: go
-go:
- - 1.9.x
- - 1.10.x
env:
- GOMAXPROCS=4 GORACE=halt_on_error=1
-install:
- - go get github.com/stretchr/testify/assert
- - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2
- - go get golang.org/x/sys/unix
- - go get golang.org/x/sys/windows
-script:
- - go test -race -v ./...
+matrix:
+ include:
+ - go: 1.10.x
+ install:
+ - go get github.com/stretchr/testify/assert
+ - go get golang.org/x/crypto/ssh/terminal
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+ script:
+ - go test -race -v ./...
+ - go: 1.11.x
+ env: GO111MODULE=on
+ install:
+ - go mod download
+ script:
+ - go test -race -v ./...
+ - go: 1.11.x
+ env: GO111MODULE=off
+ install:
+ - go get github.com/stretchr/testify/assert
+ - go get golang.org/x/crypto/ssh/terminal
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+ script:
+ - go test -race -v ./...
+ - go: 1.10.x
+ install:
+ - go get github.com/stretchr/testify/assert
+ - go get golang.org/x/crypto/ssh/terminal
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+ script:
+ - go test -race -v -tags appengine ./...
+ - go: 1.11.x
+ env: GO111MODULE=on
+ install:
+ - go mod download
+ script:
+ - go test -race -v -tags appengine ./...
+ - go: 1.11.x
+ env: GO111MODULE=off
+ install:
+ - go get github.com/stretchr/testify/assert
+ - go get golang.org/x/crypto/ssh/terminal
+ - go get golang.org/x/sys/unix
+ - go get golang.org/x/sys/windows
+ script:
+ - go test -race -v -tags appengine ./...
diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
index 1bd1deb29..ff0471869 100644
--- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md
+++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md
@@ -1,3 +1,38 @@
+# 1.1.1
+This is a bug fix release.
+ * fix the build break on Solaris
+ * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
+
+# 1.1.0
+This new release introduces:
+ * several fixes:
+ * a fix for a race condition on entry formatting
+ * proper cleanup of previously used entries before putting them back in the pool
+ * the extra new line at the end of message in text formatter has been removed
+ * a new global public API to check if a level is activated: IsLevelEnabled
+ * the following methods have been added to the Logger object
+ * IsLevelEnabled
+ * SetFormatter
+ * SetOutput
+ * ReplaceHooks
+ * introduction of go module
+ * an indent configuration for the json formatter
+ * output colour support for windows
+ * the field sort function is now configurable for text formatter
+ * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
+
+# 1.0.6
+
+This new release introduces:
+ * a new api WithTime which allows to easily force the time of the log entry
+ which is mostly useful for logger wrapper
+ * a fix reverting the immutability of the entry given as parameter to the hooks
+ a new configuration field of the json formatter in order to put all the fields
+ in a nested dictionnary
+ * a new SetOutput method in the Logger
+ * a new configuration of the textformatter to configure the name of the default keys
+ * a new configuration of the text formatter to disable the level truncation
+
# 1.0.5
* Fix hooks race (#707)
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 473bd1a0d..ca634a609 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -4,6 +4,7 @@ import (
"bytes"
"fmt"
"os"
+ "reflect"
"sync"
"time"
)
@@ -41,8 +42,11 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
- // When formatter is called in entry.log(), an Buffer may be set to entry
+ // When formatter is called in entry.log(), a Buffer may be set to entry
Buffer *bytes.Buffer
+
+ // err may contain a field formatting error
+ err string
}
func NewEntry(logger *Logger) *Entry {
@@ -80,10 +84,18 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
for k, v := range entry.Data {
data[k] = v
}
+ var field_err string
for k, v := range fields {
- data[k] = v
+ if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
+ field_err = fmt.Sprintf("can not add field %q", k)
+ if entry.err != "" {
+ field_err = entry.err + ", " + field_err
+ }
+ } else {
+ data[k] = v
+ }
}
- return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time}
+ return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
}
// Overrides the time of the Entry.
@@ -137,9 +149,9 @@ func (entry *Entry) fireHooks() {
}
func (entry *Entry) write() {
- serialized, err := entry.Logger.Formatter.Format(entry)
entry.Logger.mu.Lock()
defer entry.Logger.mu.Unlock()
+ serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
} else {
@@ -151,7 +163,7 @@ func (entry *Entry) write() {
}
func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
+ if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
@@ -161,13 +173,13 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
+ if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
+ if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
@@ -177,20 +189,20 @@ func (entry *Entry) Warning(args ...interface{}) {
}
func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
+ if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
+ if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
+ if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
@@ -199,13 +211,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
+ if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
+ if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(fmt.Sprintf(format, args...))
}
}
@@ -215,7 +227,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
+ if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(fmt.Sprintf(format, args...))
}
}
@@ -225,20 +237,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
+ if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
+ if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
+ if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(fmt.Sprintf(format, args...))
}
}
@@ -246,13 +258,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.level() >= DebugLevel {
+ if entry.Logger.IsLevelEnabled(DebugLevel) {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.level() >= InfoLevel {
+ if entry.Logger.IsLevelEnabled(InfoLevel) {
entry.Info(entry.sprintlnn(args...))
}
}
@@ -262,7 +274,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.level() >= WarnLevel {
+ if entry.Logger.IsLevelEnabled(WarnLevel) {
entry.Warn(entry.sprintlnn(args...))
}
}
@@ -272,20 +284,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.level() >= ErrorLevel {
+ if entry.Logger.IsLevelEnabled(ErrorLevel) {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.level() >= FatalLevel {
+ if entry.Logger.IsLevelEnabled(FatalLevel) {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.level() >= PanicLevel {
+ if entry.Logger.IsLevelEnabled(PanicLevel) {
entry.Panic(entry.sprintlnn(args...))
}
}
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index eb612a6f3..fb2a7a1f0 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -21,30 +21,27 @@ func SetOutput(out io.Writer) {
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Formatter = formatter
+ std.SetFormatter(formatter)
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
- std.mu.Lock()
- defer std.mu.Unlock()
std.SetLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
- std.mu.Lock()
- defer std.mu.Unlock()
- return std.level()
+ return std.GetLevel()
+}
+
+// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
+func IsLevelEnabled(level Level) bool {
+ return std.IsLevelEnabled(level)
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
- std.mu.Lock()
- defer std.mu.Unlock()
- std.Hooks.Add(hook)
+ std.AddHook(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
index 83c74947b..be2f3fcee 100644
--- a/vendor/github.com/sirupsen/logrus/formatter.go
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -2,7 +2,14 @@ package logrus
import "time"
-const defaultTimestampFormat = time.RFC3339
+// Default key names for the default fields
+const (
+ defaultTimestampFormat = time.RFC3339
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+ FieldKeyLogrusError = "logrus_error"
+)
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
@@ -48,4 +55,10 @@ func prefixFieldClashes(data Fields, fieldMap FieldMap) {
data["fields."+levelKey] = l
delete(data, levelKey)
}
+
+ logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
+ if l, ok := data[logrusErrKey]; ok {
+ data["fields."+logrusErrKey] = l
+ delete(data, logrusErrKey)
+ }
}
diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod
new file mode 100644
index 000000000..f4fed02fb
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.mod
@@ -0,0 +1,10 @@
+module github.com/sirupsen/logrus
+
+require (
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
+ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
+)
diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum
new file mode 100644
index 000000000..1f0d71964
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/go.sum
@@ -0,0 +1,12 @@
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
+github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index dab17610f..ef8d07460 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -1,6 +1,7 @@
package logrus
import (
+ "bytes"
"encoding/json"
"fmt"
)
@@ -10,13 +11,6 @@ type fieldKey string
// FieldMap allows customization of the key names for default fields.
type FieldMap map[fieldKey]string
-// Default key names for the default fields
-const (
- FieldKeyMsg = "msg"
- FieldKeyLevel = "level"
- FieldKeyTime = "time"
-)
-
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
@@ -46,6 +40,9 @@ type JSONFormatter struct {
// },
// }
FieldMap FieldMap
+
+ // PrettyPrint will indent all json logs
+ PrettyPrint bool
}
// Format renders a single log entry
@@ -75,15 +72,29 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
timestampFormat = defaultTimestampFormat
}
+ if entry.err != "" {
+ data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
+ }
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
- serialized, err := json.Marshal(data)
- if err != nil {
+ var b *bytes.Buffer
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
+
+ encoder := json.NewEncoder(b)
+ if f.PrettyPrint {
+ encoder.SetIndent("", " ")
+ }
+ if err := encoder.Encode(data); err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
- return append(serialized, '\n'), nil
+
+ return b.Bytes(), nil
}
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 342f7977d..b67bfcbd3 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -11,7 +11,7 @@ import (
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
- // something more adventorous, such as logging to Kafka.
+ // something more adventurous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
@@ -85,6 +85,7 @@ func (logger *Logger) newEntry() *Entry {
}
func (logger *Logger) releaseEntry(entry *Entry) {
+ entry.Data = map[string]interface{}{}
logger.entryPool.Put(entry)
}
@@ -121,7 +122,7 @@ func (logger *Logger) WithTime(t time.Time) *Entry {
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
- if logger.level() >= DebugLevel {
+ if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
@@ -129,7 +130,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) {
}
func (logger *Logger) Infof(format string, args ...interface{}) {
- if logger.level() >= InfoLevel {
+ if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
@@ -143,7 +144,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@@ -151,7 +152,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) {
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
@@ -159,7 +160,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) {
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- if logger.level() >= ErrorLevel {
+ if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
@@ -167,7 +168,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) {
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- if logger.level() >= FatalLevel {
+ if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
@@ -176,7 +177,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) {
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- if logger.level() >= PanicLevel {
+ if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
@@ -184,7 +185,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) {
}
func (logger *Logger) Debug(args ...interface{}) {
- if logger.level() >= DebugLevel {
+ if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
@@ -192,7 +193,7 @@ func (logger *Logger) Debug(args ...interface{}) {
}
func (logger *Logger) Info(args ...interface{}) {
- if logger.level() >= InfoLevel {
+ if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
@@ -206,7 +207,7 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@@ -214,7 +215,7 @@ func (logger *Logger) Warn(args ...interface{}) {
}
func (logger *Logger) Warning(args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
@@ -222,7 +223,7 @@ func (logger *Logger) Warning(args ...interface{}) {
}
func (logger *Logger) Error(args ...interface{}) {
- if logger.level() >= ErrorLevel {
+ if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
@@ -230,7 +231,7 @@ func (logger *Logger) Error(args ...interface{}) {
}
func (logger *Logger) Fatal(args ...interface{}) {
- if logger.level() >= FatalLevel {
+ if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
@@ -239,7 +240,7 @@ func (logger *Logger) Fatal(args ...interface{}) {
}
func (logger *Logger) Panic(args ...interface{}) {
- if logger.level() >= PanicLevel {
+ if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
@@ -247,7 +248,7 @@ func (logger *Logger) Panic(args ...interface{}) {
}
func (logger *Logger) Debugln(args ...interface{}) {
- if logger.level() >= DebugLevel {
+ if logger.IsLevelEnabled(DebugLevel) {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
@@ -255,7 +256,7 @@ func (logger *Logger) Debugln(args ...interface{}) {
}
func (logger *Logger) Infoln(args ...interface{}) {
- if logger.level() >= InfoLevel {
+ if logger.IsLevelEnabled(InfoLevel) {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
@@ -269,7 +270,7 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@@ -277,7 +278,7 @@ func (logger *Logger) Warnln(args ...interface{}) {
}
func (logger *Logger) Warningln(args ...interface{}) {
- if logger.level() >= WarnLevel {
+ if logger.IsLevelEnabled(WarnLevel) {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
@@ -285,7 +286,7 @@ func (logger *Logger) Warningln(args ...interface{}) {
}
func (logger *Logger) Errorln(args ...interface{}) {
- if logger.level() >= ErrorLevel {
+ if logger.IsLevelEnabled(ErrorLevel) {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
@@ -293,7 +294,7 @@ func (logger *Logger) Errorln(args ...interface{}) {
}
func (logger *Logger) Fatalln(args ...interface{}) {
- if logger.level() >= FatalLevel {
+ if logger.IsLevelEnabled(FatalLevel) {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
@@ -302,7 +303,7 @@ func (logger *Logger) Fatalln(args ...interface{}) {
}
func (logger *Logger) Panicln(args ...interface{}) {
- if logger.level() >= PanicLevel {
+ if logger.IsLevelEnabled(PanicLevel) {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
@@ -320,18 +321,47 @@ func (logger *Logger) level() Level {
return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
}
+// SetLevel sets the logger level.
func (logger *Logger) SetLevel(level Level) {
atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
-func (logger *Logger) SetOutput(out io.Writer) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Out = out
+// GetLevel returns the logger level.
+func (logger *Logger) GetLevel() Level {
+ return logger.level()
}
+// AddHook adds a hook to the logger hooks.
func (logger *Logger) AddHook(hook Hook) {
logger.mu.Lock()
defer logger.mu.Unlock()
logger.Hooks.Add(hook)
}
+
+// IsLevelEnabled checks if the log level of the logger is greater than the level param
+func (logger *Logger) IsLevelEnabled(level Level) bool {
+ return logger.level() >= level
+}
+
+// SetFormatter sets the logger formatter.
+func (logger *Logger) SetFormatter(formatter Formatter) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Formatter = formatter
+}
+
+// SetOutput sets the logger output.
+func (logger *Logger) SetOutput(output io.Writer) {
+ logger.mu.Lock()
+ defer logger.mu.Unlock()
+ logger.Out = output
+}
+
+// ReplaceHooks replaces the logger hooks and returns the old ones
+func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
+ logger.mu.Lock()
+ oldHooks := logger.Hooks
+ logger.Hooks = hooks
+ logger.mu.Unlock()
+ return oldHooks
+}
diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go
index dd3899974..fa0b9dea8 100644
--- a/vendor/github.com/sirupsen/logrus/logrus.go
+++ b/vendor/github.com/sirupsen/logrus/logrus.go
@@ -140,4 +140,11 @@ type FieldLogger interface {
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
+
+ // IsDebugEnabled() bool
+ // IsInfoEnabled() bool
+ // IsWarnEnabled() bool
+ // IsErrorEnabled() bool
+ // IsFatalEnabled() bool
+ // IsPanicEnabled() bool
}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
deleted file mode 100644
index 4880d13d2..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build darwin freebsd openbsd netbsd dragonfly
-// +build !appengine,!gopherjs
-
-package logrus
-
-import "golang.org/x/sys/unix"
-
-const ioctlReadTermios = unix.TIOCGETA
-
-type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
index 3de08e802..2403de981 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go
@@ -1,4 +1,4 @@
-// +build appengine gopherjs
+// +build appengine
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_js.go b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
new file mode 100644
index 000000000..0c209750a
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_js.go
@@ -0,0 +1,11 @@
+// +build js
+
+package logrus
+
+import (
+ "io"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ return false
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
index 067047a12..cf309d6fb 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go
@@ -1,4 +1,4 @@
-// +build !appengine,!gopherjs
+// +build !appengine,!js,!windows
package logrus
diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
new file mode 100644
index 000000000..3b9d2864c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go
@@ -0,0 +1,20 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+)
+
+func checkIfTerminal(w io.Writer) bool {
+ switch v := w.(type) {
+ case *os.File:
+ var mode uint32
+ err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
+ return err == nil
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
deleted file mode 100644
index f29a0097c..000000000
--- a/vendor/github.com/sirupsen/logrus/terminal_linux.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine,!gopherjs
-
-package logrus
-
-import "golang.org/x/sys/unix"
-
-const ioctlReadTermios = unix.TCGETS
-
-type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 000000000..3dbd23720
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package logrus
+
+import "io"
+
+func initTerminal(w io.Writer) {
+}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
new file mode 100644
index 000000000..b4ef5286c
--- /dev/null
+++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,18 @@
+// +build !appengine,!js,windows
+
+package logrus
+
+import (
+ "io"
+ "os"
+ "syscall"
+
+ sequences "github.com/konsorten/go-windows-terminal-sequences"
+)
+
+func initTerminal(w io.Writer) {
+ switch v := w.(type) {
+ case *os.File:
+ sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
+ }
+}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 3e5504030..d4663b8c2 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -3,6 +3,7 @@ package logrus
import (
"bytes"
"fmt"
+ "os"
"sort"
"strings"
"sync"
@@ -35,6 +36,9 @@ type TextFormatter struct {
// Force disabling colors.
DisableColors bool
+ // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
+ EnvironmentOverrideColors bool
+
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
@@ -51,6 +55,9 @@ type TextFormatter struct {
// be desired.
DisableSorting bool
+ // The keys sorting function, when uninitialized it uses sort.Strings.
+ SortingFunc func([]string)
+
// Disables the truncation of the level text to 4 characters.
DisableLevelTruncation bool
@@ -69,13 +76,33 @@ type TextFormatter struct {
// FieldKeyMsg: "@message"}}
FieldMap FieldMap
- sync.Once
+ terminalInitOnce sync.Once
}
func (f *TextFormatter) init(entry *Entry) {
if entry.Logger != nil {
f.isTerminal = checkIfTerminal(entry.Logger.Out)
+
+ if f.isTerminal {
+ initTerminal(entry.Logger.Out)
+ }
+ }
+}
+
+func (f *TextFormatter) isColored() bool {
+ isColored := f.ForceColors || f.isTerminal
+
+ if f.EnvironmentOverrideColors {
+ if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
+ isColored = true
+ } else if ok && force == "0" {
+ isColored = false
+ } else if os.Getenv("CLICOLOR") == "0" {
+ isColored = false
+ }
}
+
+ return isColored && !f.DisableColors
}
// Format renders a single log entry
@@ -87,8 +114,32 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
keys = append(keys, k)
}
+ fixedKeys := make([]string, 0, 4+len(entry.Data))
+ if !f.DisableTimestamp {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
+ }
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
+ if entry.Message != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
+ }
+ if entry.err != "" {
+ fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
+ }
+
if !f.DisableSorting {
- sort.Strings(keys)
+ if f.SortingFunc == nil {
+ sort.Strings(keys)
+ fixedKeys = append(fixedKeys, keys...)
+ } else {
+ if !f.isColored() {
+ fixedKeys = append(fixedKeys, keys...)
+ f.SortingFunc(fixedKeys)
+ } else {
+ f.SortingFunc(keys)
+ }
+ }
+ } else {
+ fixedKeys = append(fixedKeys, keys...)
}
var b *bytes.Buffer
@@ -98,26 +149,30 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
b = &bytes.Buffer{}
}
- f.Do(func() { f.init(entry) })
-
- isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+ f.terminalInitOnce.Do(func() { f.init(entry) })
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = defaultTimestampFormat
}
- if isColored {
+ if f.isColored() {
f.printColored(b, entry, keys, timestampFormat)
} else {
- if !f.DisableTimestamp {
- f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyTime), entry.Time.Format(timestampFormat))
- }
- f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyLevel), entry.Level.String())
- if entry.Message != "" {
- f.appendKeyValue(b, f.FieldMap.resolve(FieldKeyMsg), entry.Message)
- }
- for _, key := range keys {
- f.appendKeyValue(b, key, entry.Data[key])
+ for _, key := range fixedKeys {
+ var value interface{}
+ switch key {
+ case f.FieldMap.resolve(FieldKeyTime):
+ value = entry.Time.Format(timestampFormat)
+ case f.FieldMap.resolve(FieldKeyLevel):
+ value = entry.Level.String()
+ case f.FieldMap.resolve(FieldKeyMsg):
+ value = entry.Message
+ case f.FieldMap.resolve(FieldKeyLogrusError):
+ value = entry.err
+ default:
+ value = entry.Data[key]
+ }
+ f.appendKeyValue(b, key, value)
}
}
@@ -143,6 +198,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin
levelText = levelText[0:4]
}
+ // Remove a single newline if it already exists in the message to keep
+ // the behavior of logrus text_formatter the same as the stdlib log package
+ entry.Message = strings.TrimSuffix(entry.Message, "\n")
+
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
diff --git a/vendor/github.com/syndtr/goleveldb/LICENSE b/vendor/github.com/syndtr/goleveldb/LICENSE
new file mode 100644
index 000000000..4a772d1ab
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/LICENSE
@@ -0,0 +1,24 @@
+Copyright 2012 Suryandaru Triandana
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/batch.go b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
new file mode 100644
index 000000000..225920002
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/batch.go
@@ -0,0 +1,349 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrBatchCorrupted records reason of batch corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrBatchCorrupted struct {
+ Reason string
+}
+
+func (e *ErrBatchCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: batch corrupted: %s", e.Reason)
+}
+
+func newErrBatchCorrupted(reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrBatchCorrupted{reason})
+}
+
+const (
+ batchHeaderLen = 8 + 4
+ batchGrowRec = 3000
+ batchBufioSize = 16
+)
+
+// BatchReplay wraps basic batch operations.
+type BatchReplay interface {
+ Put(key, value []byte)
+ Delete(key []byte)
+}
+
+type batchIndex struct {
+ keyType keyType
+ keyPos, keyLen int
+ valuePos, valueLen int
+}
+
+func (index batchIndex) k(data []byte) []byte {
+ return data[index.keyPos : index.keyPos+index.keyLen]
+}
+
+func (index batchIndex) v(data []byte) []byte {
+ if index.valueLen != 0 {
+ return data[index.valuePos : index.valuePos+index.valueLen]
+ }
+ return nil
+}
+
+func (index batchIndex) kv(data []byte) (key, value []byte) {
+ return index.k(data), index.v(data)
+}
+
+// Batch is a write batch.
+type Batch struct {
+ data []byte
+ index []batchIndex
+
+ // internalLen is sums of key/value pair length plus 8-bytes internal key.
+ internalLen int
+}
+
+func (b *Batch) grow(n int) {
+ o := len(b.data)
+ if cap(b.data)-o < n {
+ div := 1
+ if len(b.index) > batchGrowRec {
+ div = len(b.index) / batchGrowRec
+ }
+ ndata := make([]byte, o, o+n+o/div)
+ copy(ndata, b.data)
+ b.data = ndata
+ }
+}
+
+func (b *Batch) appendRec(kt keyType, key, value []byte) {
+ n := 1 + binary.MaxVarintLen32 + len(key)
+ if kt == keyTypeVal {
+ n += binary.MaxVarintLen32 + len(value)
+ }
+ b.grow(n)
+ index := batchIndex{keyType: kt}
+ o := len(b.data)
+ data := b.data[:o+n]
+ data[o] = byte(kt)
+ o++
+ o += binary.PutUvarint(data[o:], uint64(len(key)))
+ index.keyPos = o
+ index.keyLen = len(key)
+ o += copy(data[o:], key)
+ if kt == keyTypeVal {
+ o += binary.PutUvarint(data[o:], uint64(len(value)))
+ index.valuePos = o
+ index.valueLen = len(value)
+ o += copy(data[o:], value)
+ }
+ b.data = data[:o]
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+}
+
+// Put appends 'put operation' of the given key/value pair to the batch.
+// It is safe to modify the contents of the argument after Put returns but not
+// before.
+func (b *Batch) Put(key, value []byte) {
+ b.appendRec(keyTypeVal, key, value)
+}
+
+// Delete appends 'delete operation' of the given key to the batch.
+// It is safe to modify the contents of the argument after Delete returns but
+// not before.
+func (b *Batch) Delete(key []byte) {
+ b.appendRec(keyTypeDel, key, nil)
+}
+
+// Dump dumps batch contents. The returned slice can be loaded into the
+// batch using Load method.
+// The returned slice is not its own copy, so the contents should not be
+// modified.
+func (b *Batch) Dump() []byte {
+ return b.data
+}
+
+// Load loads given slice into the batch. Previous contents of the batch
+// will be discarded.
+// The given slice will not be copied and will be used as batch buffer, so
+// it is not safe to modify the contents of the slice.
+func (b *Batch) Load(data []byte) error {
+ return b.decode(data, -1)
+}
+
+// Replay replays batch contents.
+func (b *Batch) Replay(r BatchReplay) error {
+ for _, index := range b.index {
+ switch index.keyType {
+ case keyTypeVal:
+ r.Put(index.k(b.data), index.v(b.data))
+ case keyTypeDel:
+ r.Delete(index.k(b.data))
+ }
+ }
+ return nil
+}
+
+// Len returns number of records in the batch.
+func (b *Batch) Len() int {
+ return len(b.index)
+}
+
+// Reset resets the batch.
+func (b *Batch) Reset() {
+ b.data = b.data[:0]
+ b.index = b.index[:0]
+ b.internalLen = 0
+}
+
+func (b *Batch) replayInternal(fn func(i int, kt keyType, k, v []byte) error) error {
+ for i, index := range b.index {
+ if err := fn(i, index.keyType, index.k(b.data), index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) append(p *Batch) {
+ ob := len(b.data)
+ oi := len(b.index)
+ b.data = append(b.data, p.data...)
+ b.index = append(b.index, p.index...)
+ b.internalLen += p.internalLen
+
+ // Updating index offset.
+ if ob != 0 {
+ for ; oi < len(b.index); oi++ {
+ index := &b.index[oi]
+ index.keyPos += ob
+ if index.valueLen != 0 {
+ index.valuePos += ob
+ }
+ }
+ }
+}
+
+func (b *Batch) decode(data []byte, expectedLen int) error {
+ b.data = data
+ b.index = b.index[:0]
+ b.internalLen = 0
+ err := decodeBatch(data, func(i int, index batchIndex) error {
+ b.index = append(b.index, index)
+ b.internalLen += index.keyLen + index.valueLen + 8
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+ if expectedLen >= 0 && len(b.index) != expectedLen {
+ return newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", expectedLen, len(b.index)))
+ }
+ return nil
+}
+
+func (b *Batch) putMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(b.data)); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (b *Batch) revertMem(seq uint64, mdb *memdb.DB) error {
+ var ik []byte
+ for i, index := range b.index {
+ ik = makeInternalKey(ik, index.k(b.data), seq+uint64(i), index.keyType)
+ if err := mdb.Delete(ik); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func newBatch() interface{} {
+ return &Batch{}
+}
+
+func decodeBatch(data []byte, fn func(i int, index batchIndex) error) error {
+ var index batchIndex
+ for i, o := 0, 0; o < len(data); i++ {
+ // Key type.
+ index.keyType = keyType(data[o])
+ if index.keyType > keyTypeVal {
+ return newErrBatchCorrupted(fmt.Sprintf("bad record: invalid type %#x", uint(index.keyType)))
+ }
+ o++
+
+ // Key.
+ x, n := binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid key length")
+ }
+ index.keyPos = o
+ index.keyLen = int(x)
+ o += index.keyLen
+
+ // Value.
+ if index.keyType == keyTypeVal {
+ x, n = binary.Uvarint(data[o:])
+ o += n
+ if n <= 0 || o+int(x) > len(data) {
+ return newErrBatchCorrupted("bad record: invalid value length")
+ }
+ index.valuePos = o
+ index.valueLen = int(x)
+ o += index.valueLen
+ } else {
+ index.valuePos = 0
+ index.valueLen = 0
+ }
+
+ if err := fn(i, index); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func decodeBatchToMem(data []byte, expectSeq uint64, mdb *memdb.DB) (seq uint64, batchLen int, err error) {
+ seq, batchLen, err = decodeBatchHeader(data)
+ if err != nil {
+ return 0, 0, err
+ }
+ if seq < expectSeq {
+ return 0, 0, newErrBatchCorrupted("invalid sequence number")
+ }
+ data = data[batchHeaderLen:]
+ var ik []byte
+ var decodedLen int
+ err = decodeBatch(data, func(i int, index batchIndex) error {
+ if i >= batchLen {
+ return newErrBatchCorrupted("invalid records length")
+ }
+ ik = makeInternalKey(ik, index.k(data), seq+uint64(i), index.keyType)
+ if err := mdb.Put(ik, index.v(data)); err != nil {
+ return err
+ }
+ decodedLen++
+ return nil
+ })
+ if err == nil && decodedLen != batchLen {
+ err = newErrBatchCorrupted(fmt.Sprintf("invalid records length: %d vs %d", batchLen, decodedLen))
+ }
+ return
+}
+
+func encodeBatchHeader(dst []byte, seq uint64, batchLen int) []byte {
+ dst = ensureBuffer(dst, batchHeaderLen)
+ binary.LittleEndian.PutUint64(dst, seq)
+ binary.LittleEndian.PutUint32(dst[8:], uint32(batchLen))
+ return dst
+}
+
+func decodeBatchHeader(data []byte) (seq uint64, batchLen int, err error) {
+ if len(data) < batchHeaderLen {
+ return 0, 0, newErrBatchCorrupted("too short")
+ }
+
+ seq = binary.LittleEndian.Uint64(data)
+ batchLen = int(binary.LittleEndian.Uint32(data[8:]))
+ if batchLen < 0 {
+ return 0, 0, newErrBatchCorrupted("invalid records length")
+ }
+ return
+}
+
+func batchesLen(batches []*Batch) int {
+ batchLen := 0
+ for _, batch := range batches {
+ batchLen += batch.Len()
+ }
+ return batchLen
+}
+
+func writeBatchesWithHeader(wr io.Writer, batches []*Batch, seq uint64) error {
+ if _, err := wr.Write(encodeBatchHeader(nil, seq, batchesLen(batches))); err != nil {
+ return err
+ }
+ for _, batch := range batches {
+ if _, err := wr.Write(batch.data); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
new file mode 100644
index 000000000..c5940b232
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/cache.go
@@ -0,0 +1,705 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package cache provides interface and implementation of a cache algorithms.
+package cache
+
+import (
+ "sync"
+ "sync/atomic"
+ "unsafe"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Cacher provides interface to implements a caching functionality.
+// An implementation must be safe for concurrent use.
+type Cacher interface {
+ // Capacity returns cache capacity.
+ Capacity() int
+
+ // SetCapacity sets cache capacity.
+ SetCapacity(capacity int)
+
+ // Promote promotes the 'cache node'.
+ Promote(n *Node)
+
+ // Ban evicts the 'cache node' and prevent subsequent 'promote'.
+ Ban(n *Node)
+
+ // Evict evicts the 'cache node'.
+ Evict(n *Node)
+
+ // EvictNS evicts 'cache node' with the given namespace.
+ EvictNS(ns uint64)
+
+ // EvictAll evicts all 'cache node'.
+ EvictAll()
+
+ // Close closes the 'cache tree'
+ Close() error
+}
+
+// Value is a 'cacheable object'. It may implements util.Releaser, if
+// so the the Release method will be called once object is released.
+type Value interface{}
+
+// NamespaceGetter provides convenient wrapper for namespace.
+type NamespaceGetter struct {
+ Cache *Cache
+ NS uint64
+}
+
+// Get simply calls Cache.Get() method.
+func (g *NamespaceGetter) Get(key uint64, setFunc func() (size int, value Value)) *Handle {
+ return g.Cache.Get(g.NS, key, setFunc)
+}
+
+// The hash tables implementation is based on:
+// "Dynamic-Sized Nonblocking Hash Tables", by Yujie Liu,
+// Kunlong Zhang, and Michael Spear.
+// ACM Symposium on Principles of Distributed Computing, Jul 2014.
+
+const (
+ mInitialSize = 1 << 4
+ mOverflowThreshold = 1 << 5
+ mOverflowGrowThreshold = 1 << 7
+)
+
+type mBucket struct {
+ mu sync.Mutex
+ node []*Node
+ frozen bool
+}
+
+func (b *mBucket) freeze() []*Node {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if !b.frozen {
+ b.frozen = true
+ }
+ return b.node
+}
+
+func (b *mBucket) get(r *Cache, h *mNode, hash uint32, ns, key uint64, noset bool) (done, added bool, n *Node) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ for _, n := range b.node {
+ if n.hash == hash && n.ns == ns && n.key == key {
+ atomic.AddInt32(&n.ref, 1)
+ b.mu.Unlock()
+ return true, false, n
+ }
+ }
+
+ // Get only.
+ if noset {
+ b.mu.Unlock()
+ return true, false, nil
+ }
+
+ // Create node.
+ n = &Node{
+ r: r,
+ hash: hash,
+ ns: ns,
+ key: key,
+ ref: 1,
+ }
+ // Add node to bucket.
+ b.node = append(b.node, n)
+ bLen := len(b.node)
+ b.mu.Unlock()
+
+ // Update counter.
+ grow := atomic.AddInt32(&r.nodes, 1) >= h.growThreshold
+ if bLen > mOverflowThreshold {
+ grow = grow || atomic.AddInt32(&h.overflow, 1) >= mOverflowGrowThreshold
+ }
+
+ // Grow.
+ if grow && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) << 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+
+ return true, true, n
+}
+
+func (b *mBucket) delete(r *Cache, h *mNode, hash uint32, ns, key uint64) (done, deleted bool) {
+ b.mu.Lock()
+
+ if b.frozen {
+ b.mu.Unlock()
+ return
+ }
+
+ // Scan the node.
+ var (
+ n *Node
+ bLen int
+ )
+ for i := range b.node {
+ n = b.node[i]
+ if n.ns == ns && n.key == key {
+ if atomic.LoadInt32(&n.ref) == 0 {
+ deleted = true
+
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Remove node from bucket.
+ b.node = append(b.node[:i], b.node[i+1:]...)
+ bLen = len(b.node)
+ }
+ break
+ }
+ }
+ b.mu.Unlock()
+
+ if deleted {
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+
+ // Update counter.
+ atomic.AddInt32(&r.size, int32(n.size)*-1)
+ shrink := atomic.AddInt32(&r.nodes, -1) < h.shrinkThreshold
+ if bLen >= mOverflowThreshold {
+ atomic.AddInt32(&h.overflow, -1)
+ }
+
+ // Shrink.
+ if shrink && len(h.buckets) > mInitialSize && atomic.CompareAndSwapInt32(&h.resizeInProgess, 0, 1) {
+ nhLen := len(h.buckets) >> 1
+ nh := &mNode{
+ buckets: make([]unsafe.Pointer, nhLen),
+ mask: uint32(nhLen) - 1,
+ pred: unsafe.Pointer(h),
+ growThreshold: int32(nhLen * mOverflowThreshold),
+ shrinkThreshold: int32(nhLen >> 1),
+ }
+ ok := atomic.CompareAndSwapPointer(&r.mHead, unsafe.Pointer(h), unsafe.Pointer(nh))
+ if !ok {
+ panic("BUG: failed swapping head")
+ }
+ go nh.initBuckets()
+ }
+ }
+
+ return true, deleted
+}
+
+type mNode struct {
+ buckets []unsafe.Pointer // []*mBucket
+ mask uint32
+ pred unsafe.Pointer // *mNode
+ resizeInProgess int32
+
+ overflow int32
+ growThreshold int32
+ shrinkThreshold int32
+}
+
+func (n *mNode) initBucket(i uint32) *mBucket {
+ if b := (*mBucket)(atomic.LoadPointer(&n.buckets[i])); b != nil {
+ return b
+ }
+
+ p := (*mNode)(atomic.LoadPointer(&n.pred))
+ if p != nil {
+ var node []*Node
+ if n.mask > p.mask {
+ // Grow.
+ pb := (*mBucket)(atomic.LoadPointer(&p.buckets[i&p.mask]))
+ if pb == nil {
+ pb = p.initBucket(i & p.mask)
+ }
+ m := pb.freeze()
+ // Split nodes.
+ for _, x := range m {
+ if x.hash&n.mask == i {
+ node = append(node, x)
+ }
+ }
+ } else {
+ // Shrink.
+ pb0 := (*mBucket)(atomic.LoadPointer(&p.buckets[i]))
+ if pb0 == nil {
+ pb0 = p.initBucket(i)
+ }
+ pb1 := (*mBucket)(atomic.LoadPointer(&p.buckets[i+uint32(len(n.buckets))]))
+ if pb1 == nil {
+ pb1 = p.initBucket(i + uint32(len(n.buckets)))
+ }
+ m0 := pb0.freeze()
+ m1 := pb1.freeze()
+ // Merge nodes.
+ node = make([]*Node, 0, len(m0)+len(m1))
+ node = append(node, m0...)
+ node = append(node, m1...)
+ }
+ b := &mBucket{node: node}
+ if atomic.CompareAndSwapPointer(&n.buckets[i], nil, unsafe.Pointer(b)) {
+ if len(node) > mOverflowThreshold {
+ atomic.AddInt32(&n.overflow, int32(len(node)-mOverflowThreshold))
+ }
+ return b
+ }
+ }
+
+ return (*mBucket)(atomic.LoadPointer(&n.buckets[i]))
+}
+
+func (n *mNode) initBuckets() {
+ for i := range n.buckets {
+ n.initBucket(uint32(i))
+ }
+ atomic.StorePointer(&n.pred, nil)
+}
+
+// Cache is a 'cache map'.
+type Cache struct {
+ mu sync.RWMutex
+ mHead unsafe.Pointer // *mNode
+ nodes int32
+ size int32
+ cacher Cacher
+ closed bool
+}
+
+// NewCache creates a new 'cache map'. The cacher is optional and
+// may be nil.
+func NewCache(cacher Cacher) *Cache {
+ h := &mNode{
+ buckets: make([]unsafe.Pointer, mInitialSize),
+ mask: mInitialSize - 1,
+ growThreshold: int32(mInitialSize * mOverflowThreshold),
+ shrinkThreshold: 0,
+ }
+ for i := range h.buckets {
+ h.buckets[i] = unsafe.Pointer(&mBucket{})
+ }
+ r := &Cache{
+ mHead: unsafe.Pointer(h),
+ cacher: cacher,
+ }
+ return r
+}
+
+func (r *Cache) getBucket(hash uint32) (*mNode, *mBucket) {
+ h := (*mNode)(atomic.LoadPointer(&r.mHead))
+ i := hash & h.mask
+ b := (*mBucket)(atomic.LoadPointer(&h.buckets[i]))
+ if b == nil {
+ b = h.initBucket(i)
+ }
+ return h, b
+}
+
+func (r *Cache) delete(n *Node) bool {
+ for {
+ h, b := r.getBucket(n.hash)
+ done, deleted := b.delete(r, h, n.hash, n.ns, n.key)
+ if done {
+ return deleted
+ }
+ }
+ return false
+}
+
+// Nodes returns number of 'cache node' in the map.
+func (r *Cache) Nodes() int {
+ return int(atomic.LoadInt32(&r.nodes))
+}
+
+// Size returns sums of 'cache node' size in the map.
+func (r *Cache) Size() int {
+ return int(atomic.LoadInt32(&r.size))
+}
+
+// Capacity returns cache capacity.
+func (r *Cache) Capacity() int {
+ if r.cacher == nil {
+ return 0
+ }
+ return r.cacher.Capacity()
+}
+
+// SetCapacity sets cache capacity.
+func (r *Cache) SetCapacity(capacity int) {
+ if r.cacher != nil {
+ r.cacher.SetCapacity(capacity)
+ }
+}
+
+// Get gets 'cache node' with the given namespace and key.
+// If cache node is not found and setFunc is not nil, Get will atomically creates
+// the 'cache node' by calling setFunc. Otherwise Get will returns nil.
+//
+// The returned 'cache handle' should be released after use by calling Release
+// method.
+func (r *Cache) Get(ns, key uint64, setFunc func() (size int, value Value)) *Handle {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return nil
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, setFunc == nil)
+ if done {
+ if n != nil {
+ n.mu.Lock()
+ if n.value == nil {
+ if setFunc == nil {
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+
+ n.size, n.value = setFunc()
+ if n.value == nil {
+ n.size = 0
+ n.mu.Unlock()
+ n.unref()
+ return nil
+ }
+ atomic.AddInt32(&r.size, int32(n.size))
+ }
+ n.mu.Unlock()
+ if r.cacher != nil {
+ r.cacher.Promote(n)
+ }
+ return &Handle{unsafe.Pointer(n)}
+ }
+
+ break
+ }
+ }
+ return nil
+}
+
+// Delete removes and ban 'cache node' with the given namespace and key.
+// A banned 'cache node' will never inserted into the 'cache tree'. Ban
+// only attributed to the particular 'cache node', so when a 'cache node'
+// is recreated it will not be banned.
+//
+// If onDel is not nil, then it will be executed if such 'cache node'
+// doesn't exist or once the 'cache node' is released.
+//
+// Delete return true is such 'cache node' exist.
+func (r *Cache) Delete(ns, key uint64, onDel func()) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if onDel != nil {
+ n.mu.Lock()
+ n.onDel = append(n.onDel, onDel)
+ n.mu.Unlock()
+ }
+ if r.cacher != nil {
+ r.cacher.Ban(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ if onDel != nil {
+ onDel()
+ }
+
+ return false
+}
+
+// Evict evicts 'cache node' with the given namespace and key. This will
+// simply call Cacher.Evict.
+//
+// Evict return true is such 'cache node' exist.
+func (r *Cache) Evict(ns, key uint64) bool {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return false
+ }
+
+ hash := murmur32(ns, key, 0xf00)
+ for {
+ h, b := r.getBucket(hash)
+ done, _, n := b.get(r, h, hash, ns, key, true)
+ if done {
+ if n != nil {
+ if r.cacher != nil {
+ r.cacher.Evict(n)
+ }
+ n.unref()
+ return true
+ }
+
+ break
+ }
+ }
+
+ return false
+}
+
+// EvictNS evicts 'cache node' with the given namespace. This will
+// simply call Cacher.EvictNS.
+func (r *Cache) EvictNS(ns uint64) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictNS(ns)
+ }
+}
+
+// EvictAll evicts all 'cache node'. This will simply call Cacher.EvictAll.
+func (r *Cache) EvictAll() {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+ if r.closed {
+ return
+ }
+
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ }
+}
+
+// Close closes the 'cache map' and forcefully releases all 'cache node'.
+func (r *Cache) Close() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+
+ h := (*mNode)(r.mHead)
+ h.initBuckets()
+
+ for i := range h.buckets {
+ b := (*mBucket)(h.buckets[i])
+ for _, n := range b.node {
+ // Call releaser.
+ if n.value != nil {
+ if r, ok := n.value.(util.Releaser); ok {
+ r.Release()
+ }
+ n.value = nil
+ }
+
+ // Call OnDel.
+ for _, f := range n.onDel {
+ f()
+ }
+ n.onDel = nil
+ }
+ }
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// CloseWeak closes the 'cache map' and evict all 'cache node' from cacher, but
+// unlike Close it doesn't forcefully releases 'cache node'.
+func (r *Cache) CloseWeak() error {
+ r.mu.Lock()
+ if !r.closed {
+ r.closed = true
+ }
+ r.mu.Unlock()
+
+ // Avoid deadlock.
+ if r.cacher != nil {
+ r.cacher.EvictAll()
+ if err := r.cacher.Close(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Node is a 'cache node'.
+type Node struct {
+ r *Cache
+
+ hash uint32
+ ns, key uint64
+
+ mu sync.Mutex
+ size int
+ value Value
+
+ ref int32
+ onDel []func()
+
+ CacheData unsafe.Pointer
+}
+
+// NS returns this 'cache node' namespace.
+func (n *Node) NS() uint64 {
+ return n.ns
+}
+
+// Key returns this 'cache node' key.
+func (n *Node) Key() uint64 {
+ return n.key
+}
+
+// Size returns this 'cache node' size.
+func (n *Node) Size() int {
+ return n.size
+}
+
+// Value returns this 'cache node' value.
+func (n *Node) Value() Value {
+ return n.value
+}
+
+// Ref returns this 'cache node' ref counter.
+func (n *Node) Ref() int32 {
+ return atomic.LoadInt32(&n.ref)
+}
+
+// GetHandle returns an handle for this 'cache node'.
+func (n *Node) GetHandle() *Handle {
+ if atomic.AddInt32(&n.ref, 1) <= 1 {
+ panic("BUG: Node.GetHandle on zero ref")
+ }
+ return &Handle{unsafe.Pointer(n)}
+}
+
+func (n *Node) unref() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.delete(n)
+ }
+}
+
+func (n *Node) unrefLocked() {
+ if atomic.AddInt32(&n.ref, -1) == 0 {
+ n.r.mu.RLock()
+ if !n.r.closed {
+ n.r.delete(n)
+ }
+ n.r.mu.RUnlock()
+ }
+}
+
+// Handle is a 'cache handle' of a 'cache node'.
+type Handle struct {
+ n unsafe.Pointer // *Node
+}
+
+// Value returns the value of the 'cache node'.
+func (h *Handle) Value() Value {
+ n := (*Node)(atomic.LoadPointer(&h.n))
+ if n != nil {
+ return n.value
+ }
+ return nil
+}
+
+// Release releases this 'cache handle'.
+// It is safe to call release multiple times.
+func (h *Handle) Release() {
+ nPtr := atomic.LoadPointer(&h.n)
+ if nPtr != nil && atomic.CompareAndSwapPointer(&h.n, nPtr, nil) {
+ n := (*Node)(nPtr)
+ n.unrefLocked()
+ }
+}
+
+func murmur32(ns, key uint64, seed uint32) uint32 {
+ const (
+ m = uint32(0x5bd1e995)
+ r = 24
+ )
+
+ k1 := uint32(ns >> 32)
+ k2 := uint32(ns)
+ k3 := uint32(key >> 32)
+ k4 := uint32(key)
+
+ k1 *= m
+ k1 ^= k1 >> r
+ k1 *= m
+
+ k2 *= m
+ k2 ^= k2 >> r
+ k2 *= m
+
+ k3 *= m
+ k3 ^= k3 >> r
+ k3 *= m
+
+ k4 *= m
+ k4 ^= k4 >> r
+ k4 *= m
+
+ h := seed
+
+ h *= m
+ h ^= k1
+ h *= m
+ h ^= k2
+ h *= m
+ h ^= k3
+ h *= m
+ h ^= k4
+
+ h ^= h >> 13
+ h *= m
+ h ^= h >> 15
+
+ return h
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
new file mode 100644
index 000000000..d9a84cde1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/cache/lru.go
@@ -0,0 +1,195 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package cache
+
+import (
+ "sync"
+ "unsafe"
+)
+
+type lruNode struct {
+ n *Node
+ h *Handle
+ ban bool
+
+ next, prev *lruNode
+}
+
+func (n *lruNode) insert(at *lruNode) {
+ x := at.next
+ at.next = n
+ n.prev = at
+ n.next = x
+ x.prev = n
+}
+
+func (n *lruNode) remove() {
+ if n.prev != nil {
+ n.prev.next = n.next
+ n.next.prev = n.prev
+ n.prev = nil
+ n.next = nil
+ } else {
+ panic("BUG: removing removed node")
+ }
+}
+
+type lru struct {
+ mu sync.Mutex
+ capacity int
+ used int
+ recent lruNode
+}
+
+func (r *lru) reset() {
+ r.recent.next = &r.recent
+ r.recent.prev = &r.recent
+ r.used = 0
+}
+
+func (r *lru) Capacity() int {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+ return r.capacity
+}
+
+func (r *lru) SetCapacity(capacity int) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ r.capacity = capacity
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Promote(n *Node) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ if n.CacheData == nil {
+ if n.Size() <= r.capacity {
+ rn := &lruNode{n: n, h: n.GetHandle()}
+ rn.insert(&r.recent)
+ n.CacheData = unsafe.Pointer(rn)
+ r.used += n.Size()
+
+ for r.used > r.capacity {
+ rn := r.recent.prev
+ if rn == nil {
+ panic("BUG: invalid LRU used or capacity counter")
+ }
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.insert(&r.recent)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Ban(n *Node) {
+ r.mu.Lock()
+ if n.CacheData == nil {
+ n.CacheData = unsafe.Pointer(&lruNode{n: n, ban: true})
+ } else {
+ rn := (*lruNode)(n.CacheData)
+ if !rn.ban {
+ rn.remove()
+ rn.ban = true
+ r.used -= rn.n.Size()
+ r.mu.Unlock()
+
+ rn.h.Release()
+ rn.h = nil
+ return
+ }
+ }
+ r.mu.Unlock()
+}
+
+func (r *lru) Evict(n *Node) {
+ r.mu.Lock()
+ rn := (*lruNode)(n.CacheData)
+ if rn == nil || rn.ban {
+ r.mu.Unlock()
+ return
+ }
+ n.CacheData = nil
+ r.mu.Unlock()
+
+ rn.h.Release()
+}
+
+func (r *lru) EvictNS(ns uint64) {
+ var evicted []*lruNode
+
+ r.mu.Lock()
+ for e := r.recent.prev; e != &r.recent; {
+ rn := e
+ e = e.prev
+ if rn.n.NS() == ns {
+ rn.remove()
+ rn.n.CacheData = nil
+ r.used -= rn.n.Size()
+ evicted = append(evicted, rn)
+ }
+ }
+ r.mu.Unlock()
+
+ for _, rn := range evicted {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) EvictAll() {
+ r.mu.Lock()
+ back := r.recent.prev
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.n.CacheData = nil
+ }
+ r.reset()
+ r.mu.Unlock()
+
+ for rn := back; rn != &r.recent; rn = rn.prev {
+ rn.h.Release()
+ }
+}
+
+func (r *lru) Close() error {
+ return nil
+}
+
+// NewLRU create a new LRU-cache.
+func NewLRU(capacity int) Cacher {
+ r := &lru{capacity: capacity}
+ r.reset()
+ return r
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
new file mode 100644
index 000000000..448402b82
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer.go
@@ -0,0 +1,67 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+)
+
+type iComparer struct {
+ ucmp comparer.Comparer
+}
+
+func (icmp *iComparer) uName() string {
+ return icmp.ucmp.Name()
+}
+
+func (icmp *iComparer) uCompare(a, b []byte) int {
+ return icmp.ucmp.Compare(a, b)
+}
+
+func (icmp *iComparer) uSeparator(dst, a, b []byte) []byte {
+ return icmp.ucmp.Separator(dst, a, b)
+}
+
+func (icmp *iComparer) uSuccessor(dst, b []byte) []byte {
+ return icmp.ucmp.Successor(dst, b)
+}
+
+func (icmp *iComparer) Name() string {
+ return icmp.uName()
+}
+
+func (icmp *iComparer) Compare(a, b []byte) int {
+ x := icmp.uCompare(internalKey(a).ukey(), internalKey(b).ukey())
+ if x == 0 {
+ if m, n := internalKey(a).num(), internalKey(b).num(); m > n {
+ return -1
+ } else if m < n {
+ return 1
+ }
+ }
+ return x
+}
+
+func (icmp *iComparer) Separator(dst, a, b []byte) []byte {
+ ua, ub := internalKey(a).ukey(), internalKey(b).ukey()
+ dst = icmp.uSeparator(dst, ua, ub)
+ if dst != nil && len(dst) < len(ua) && icmp.uCompare(ua, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
+
+func (icmp *iComparer) Successor(dst, b []byte) []byte {
+ ub := internalKey(b).ukey()
+ dst = icmp.uSuccessor(dst, ub)
+ if dst != nil && len(dst) < len(ub) && icmp.uCompare(ub, dst) < 0 {
+ // Append earliest possible number.
+ return append(dst, keyMaxNumBytes...)
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
new file mode 100644
index 000000000..14dddf88d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/bytes_comparer.go
@@ -0,0 +1,51 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package comparer
+
+import "bytes"
+
+type bytesComparer struct{}
+
+func (bytesComparer) Compare(a, b []byte) int {
+ return bytes.Compare(a, b)
+}
+
+func (bytesComparer) Name() string {
+ return "leveldb.BytewiseComparator"
+}
+
+func (bytesComparer) Separator(dst, a, b []byte) []byte {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for ; i < n && a[i] == b[i]; i++ {
+ }
+ if i >= n {
+ // Do not shorten if one string is a prefix of the other
+ } else if c := a[i]; c < 0xff && c+1 < b[i] {
+ dst = append(dst, a[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ return nil
+}
+
+func (bytesComparer) Successor(dst, b []byte) []byte {
+ for i, c := range b {
+ if c != 0xff {
+ dst = append(dst, b[:i+1]...)
+ dst[i]++
+ return dst
+ }
+ }
+ return nil
+}
+
+// DefaultComparer are default implementation of the Comparer interface.
+// It uses the natural ordering, consistent with bytes.Compare.
+var DefaultComparer = bytesComparer{}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
new file mode 100644
index 000000000..14a28f16f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/comparer/comparer.go
@@ -0,0 +1,57 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package comparer provides interface and implementation for ordering
+// sets of data.
+package comparer
+
+// BasicComparer is the interface that wraps the basic Compare method.
+type BasicComparer interface {
+ // Compare returns -1, 0, or +1 depending on whether a is 'less than',
+ // 'equal to' or 'greater than' b. The two arguments can only be 'equal'
+ // if their contents are exactly equal. Furthermore, the empty slice
+ // must be 'less than' any non-empty slice.
+ Compare(a, b []byte) int
+}
+
+// Comparer defines a total ordering over the space of []byte keys: a 'less
+// than' relationship.
+type Comparer interface {
+ BasicComparer
+
+ // Name returns name of the comparer.
+ //
+ // The Level-DB on-disk format stores the comparer name, and opening a
+ // database with a different comparer from the one it was created with
+ // will result in an error.
+ //
+ // An implementation to a new name whenever the comparer implementation
+ // changes in a way that will cause the relative ordering of any two keys
+ // to change.
+ //
+ // Names starting with "leveldb." are reserved and should not be used
+ // by any users of this package.
+ Name() string
+
+ // Bellow are advanced functions used used to reduce the space requirements
+ // for internal data structures such as index blocks.
+
+ // Separator appends a sequence of bytes x to dst such that a <= x && x < b,
+ // where 'less than' is consistent with Compare. An implementation should
+ // return nil if x equal to a.
+ //
+ // Either contents of a or b should not by any means modified. Doing so
+ // may cause corruption on the internal state.
+ Separator(dst, a, b []byte) []byte
+
+ // Successor appends a sequence of bytes x to dst such that x >= b, where
+ // 'less than' is consistent with Compare. An implementation should return
+ // nil if x equal to b.
+ //
+ // Contents of b should not by any means modified. Doing so may cause
+ // corruption on the internal state.
+ Successor(dst, b []byte) []byte
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db.go b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
new file mode 100644
index 000000000..e7ac06541
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db.go
@@ -0,0 +1,1175 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "io"
+ "os"
+ "runtime"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// DB is a LevelDB database.
+type DB struct {
+ // Need 64-bit alignment.
+ seq uint64
+
+ // Stats. Need 64-bit alignment.
+ cWriteDelay int64 // The cumulative duration of write delays
+ cWriteDelayN int32 // The cumulative number of write delays
+ inWritePaused int32 // The indicator whether write operation is paused by compaction
+ aliveSnaps, aliveIters int32
+
+ // Session.
+ s *session
+
+ // MemDB.
+ memMu sync.RWMutex
+ memPool chan *memdb.DB
+ mem, frozenMem *memDB
+ journal *journal.Writer
+ journalWriter storage.Writer
+ journalFd storage.FileDesc
+ frozenJournalFd storage.FileDesc
+ frozenSeq uint64
+
+ // Snapshot.
+ snapsMu sync.Mutex
+ snapsList *list.List
+
+ // Write.
+ batchPool sync.Pool
+ writeMergeC chan writeMerge
+ writeMergedC chan bool
+ writeLockC chan struct{}
+ writeAckC chan error
+ writeDelay time.Duration
+ writeDelayN int
+ tr *Transaction
+
+ // Compaction.
+ compCommitLk sync.Mutex
+ tcompCmdC chan cCmd
+ tcompPauseC chan chan<- struct{}
+ mcompCmdC chan cCmd
+ compErrC chan error
+ compPerErrC chan error
+ compErrSetC chan error
+ compWriteLocking bool
+ compStats cStats
+ memdbMaxLevel int // For testing.
+
+ // Close.
+ closeW sync.WaitGroup
+ closeC chan struct{}
+ closed uint32
+ closer io.Closer
+}
+
+func openDB(s *session) (*DB, error) {
+ s.log("db@open opening")
+ start := time.Now()
+ db := &DB{
+ s: s,
+ // Initial sequence
+ seq: s.stSeqNum,
+ // MemDB
+ memPool: make(chan *memdb.DB, 1),
+ // Snapshot
+ snapsList: list.New(),
+ // Write
+ batchPool: sync.Pool{New: newBatch},
+ writeMergeC: make(chan writeMerge),
+ writeMergedC: make(chan bool),
+ writeLockC: make(chan struct{}, 1),
+ writeAckC: make(chan error),
+ // Compaction
+ tcompCmdC: make(chan cCmd),
+ tcompPauseC: make(chan chan<- struct{}),
+ mcompCmdC: make(chan cCmd),
+ compErrC: make(chan error),
+ compPerErrC: make(chan error),
+ compErrSetC: make(chan error),
+ // Close
+ closeC: make(chan struct{}),
+ }
+
+ // Read-only mode.
+ readOnly := s.o.GetReadOnly()
+
+ if readOnly {
+ // Recover journals (read-only mode).
+ if err := db.recoverJournalRO(); err != nil {
+ return nil, err
+ }
+ } else {
+ // Recover journals.
+ if err := db.recoverJournal(); err != nil {
+ return nil, err
+ }
+
+ // Remove any obsolete files.
+ if err := db.checkAndCleanFiles(); err != nil {
+ // Close journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return nil, err
+ }
+
+ }
+
+ // Doesn't need to be included in the wait group.
+ go db.compactionError()
+ go db.mpoolDrain()
+
+ if readOnly {
+ db.SetReadOnly()
+ } else {
+ db.closeW.Add(2)
+ go db.tCompaction()
+ go db.mCompaction()
+ // go db.jWriter()
+ }
+
+ s.logf("db@open done T·%v", time.Since(start))
+
+ runtime.SetFinalizer(db, (*DB).Close)
+ return db, nil
+}
+
+// Open opens or creates a DB for the given storage.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist Open will returns
+// os.ErrExist error.
+//
+// Open will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Open(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = s.recover()
+ if err != nil {
+ if !os.IsNotExist(err) || s.o.GetErrorIfMissing() {
+ return
+ }
+ err = s.create()
+ if err != nil {
+ return
+ }
+ } else if s.o.GetErrorIfExist() {
+ err = os.ErrExist
+ return
+ }
+
+ return openDB(s)
+}
+
+// OpenFile opens or creates a DB for the given path.
+// The DB will be created if not exist, unless ErrorIfMissing is true.
+// Also, if ErrorIfExist is true and the DB exist OpenFile will returns
+// os.ErrExist error.
+//
+// OpenFile uses standard file-system backed storage implementation as
+// described in the leveldb/storage package.
+//
+// OpenFile will return an error with type of ErrCorrupted if corruption
+// detected in the DB. Use errors.IsCorrupted to test whether an error is
+// due to corruption. Corrupted DB can be recovered with Recover function.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func OpenFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, o.GetReadOnly())
+ if err != nil {
+ return
+ }
+ db, err = Open(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+// Recover recovers and opens a DB with missing or corrupted manifest files
+// for the given storage. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func Recover(stor storage.Storage, o *opt.Options) (db *DB, err error) {
+ s, err := newSession(stor, o)
+ if err != nil {
+ return
+ }
+ defer func() {
+ if err != nil {
+ s.close()
+ s.release()
+ }
+ }()
+
+ err = recoverTable(s, o)
+ if err != nil {
+ return
+ }
+ return openDB(s)
+}
+
+// RecoverFile recovers and opens a DB with missing or corrupted manifest files
+// for the given path. It will ignore any manifest files, valid or not.
+// The DB must already exist or it will returns an error.
+// Also, Recover will ignore ErrorIfMissing and ErrorIfExist options.
+//
+// RecoverFile uses standard file-system backed storage implementation as described
+// in the leveldb/storage package.
+//
+// The returned DB instance is safe for concurrent use.
+// The DB must be closed after use, by calling Close method.
+func RecoverFile(path string, o *opt.Options) (db *DB, err error) {
+ stor, err := storage.OpenFile(path, false)
+ if err != nil {
+ return
+ }
+ db, err = Recover(stor, o)
+ if err != nil {
+ stor.Close()
+ } else {
+ db.closer = stor
+ }
+ return
+}
+
+func recoverTable(s *session, o *opt.Options) error {
+ o = dupOptions(o)
+ // Mask StrictReader, lets StrictRecovery doing its job.
+ o.Strict &= ^opt.StrictReader
+
+ // Get all tables and sort it by file number.
+ fds, err := s.stor.List(storage.TypeTable)
+ if err != nil {
+ return err
+ }
+ sortFds(fds)
+
+ var (
+ maxSeq uint64
+ recoveredKey, goodKey, corruptedKey, corruptedBlock, droppedTable int
+
+ // We will drop corrupted table.
+ strict = o.GetStrict(opt.StrictRecovery)
+ noSync = o.GetNoSync()
+
+ rec = &sessionRecord{}
+ bpool = util.NewBufferPool(o.GetBlockSize() + 5)
+ )
+ buildTable := func(iter iterator.Iterator) (tmpFd storage.FileDesc, size int64, err error) {
+ tmpFd = s.newTemp()
+ writer, err := s.stor.Create(tmpFd)
+ if err != nil {
+ return
+ }
+ defer func() {
+ writer.Close()
+ if err != nil {
+ s.stor.Remove(tmpFd)
+ tmpFd = storage.FileDesc{}
+ }
+ }()
+
+ // Copy entries.
+ tw := table.NewWriter(writer, o)
+ for iter.Next() {
+ key := iter.Key()
+ if validInternalKey(key) {
+ err = tw.Append(key, iter.Value())
+ if err != nil {
+ return
+ }
+ }
+ }
+ err = iter.Error()
+ if err != nil && !errors.IsCorrupted(err) {
+ return
+ }
+ err = tw.Close()
+ if err != nil {
+ return
+ }
+ if !noSync {
+ err = writer.Sync()
+ if err != nil {
+ return
+ }
+ }
+ size = int64(tw.BytesLen())
+ return
+ }
+ recoverTable := func(fd storage.FileDesc) error {
+ s.logf("table@recovery recovering @%d", fd.Num)
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+ var closed bool
+ defer func() {
+ if !closed {
+ reader.Close()
+ }
+ }()
+
+ // Get file size.
+ size, err := reader.Seek(0, 2)
+ if err != nil {
+ return err
+ }
+
+ var (
+ tSeq uint64
+ tgoodKey, tcorruptedKey, tcorruptedBlock int
+ imin, imax []byte
+ )
+ tr, err := table.NewReader(reader, size, fd, nil, bpool, o)
+ if err != nil {
+ return err
+ }
+ iter := tr.NewIterator(nil, nil)
+ if itererr, ok := iter.(iterator.ErrorCallbackSetter); ok {
+ itererr.SetErrorCallback(func(err error) {
+ if errors.IsCorrupted(err) {
+ s.logf("table@recovery block corruption @%d %q", fd.Num, err)
+ tcorruptedBlock++
+ }
+ })
+ }
+
+ // Scan the table.
+ for iter.Next() {
+ key := iter.Key()
+ _, seq, _, kerr := parseInternalKey(key)
+ if kerr != nil {
+ tcorruptedKey++
+ continue
+ }
+ tgoodKey++
+ if seq > tSeq {
+ tSeq = seq
+ }
+ if imin == nil {
+ imin = append([]byte{}, key...)
+ }
+ imax = append(imax[:0], key...)
+ }
+ if err := iter.Error(); err != nil && !errors.IsCorrupted(err) {
+ iter.Release()
+ return err
+ }
+ iter.Release()
+
+ goodKey += tgoodKey
+ corruptedKey += tcorruptedKey
+ corruptedBlock += tcorruptedBlock
+
+ if strict && (tcorruptedKey > 0 || tcorruptedBlock > 0) {
+ droppedTable++
+ s.logf("table@recovery dropped @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ return nil
+ }
+
+ if tgoodKey > 0 {
+ if tcorruptedKey > 0 || tcorruptedBlock > 0 {
+ // Rebuild the table.
+ s.logf("table@recovery rebuilding @%d", fd.Num)
+ iter := tr.NewIterator(nil, nil)
+ tmpFd, newSize, err := buildTable(iter)
+ iter.Release()
+ if err != nil {
+ return err
+ }
+ closed = true
+ reader.Close()
+ if err := s.stor.Rename(tmpFd, fd); err != nil {
+ return err
+ }
+ size = newSize
+ }
+ if tSeq > maxSeq {
+ maxSeq = tSeq
+ }
+ recoveredKey += tgoodKey
+ // Add table to level 0.
+ rec.addTable(0, fd.Num, size, imin, imax)
+ s.logf("table@recovery recovered @%d Gk·%d Ck·%d Cb·%d S·%d Q·%d", fd.Num, tgoodKey, tcorruptedKey, tcorruptedBlock, size, tSeq)
+ } else {
+ droppedTable++
+ s.logf("table@recovery unrecoverable @%d Ck·%d Cb·%d S·%d", fd.Num, tcorruptedKey, tcorruptedBlock, size)
+ }
+
+ return nil
+ }
+
+ // Recover all tables.
+ if len(fds) > 0 {
+ s.logf("table@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ s.markFileNum(fds[len(fds)-1].Num)
+
+ for _, fd := range fds {
+ if err := recoverTable(fd); err != nil {
+ return err
+ }
+ }
+
+ s.logf("table@recovery recovered F·%d N·%d Gk·%d Ck·%d Q·%d", len(fds), recoveredKey, goodKey, corruptedKey, maxSeq)
+ }
+
+ // Set sequence number.
+ rec.setSeqNum(maxSeq)
+
+ // Create new manifest.
+ if err := s.create(); err != nil {
+ return err
+ }
+
+ // Commit.
+ return s.commit(rec)
+}
+
+func (db *DB) recoverJournal() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ ofd storage.FileDesc // Obsolete file.
+ rec = &sessionRecord{}
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery F·%d", len(fds))
+
+ // Mark file number as used.
+ db.s.markFileNum(fds[len(fds)-1].Num)
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ jr *journal.Reader
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Flush memdb and remove obsolete journal file.
+ if !ofd.Zero() {
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+ }
+
+ rec.setJournalNum(fd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ fr.Close()
+ return err
+ }
+ rec.resetAddedTables()
+
+ db.s.stor.Remove(ofd)
+ ofd = storage.FileDesc{}
+ }
+
+ // Replay journal to memdb.
+ mdb.Reset()
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+
+ // Flush it if large enough.
+ if mdb.Size() >= writeBuffer {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ fr.Close()
+ return err
+ }
+
+ mdb.Reset()
+ }
+ }
+
+ fr.Close()
+ ofd = fd
+ }
+
+ // Flush the last memdb.
+ if mdb.Len() > 0 {
+ if _, err := db.s.flushMemdb(rec, mdb, 0); err != nil {
+ return err
+ }
+ }
+ }
+
+ // Create a new journal.
+ if _, err := db.newMem(0); err != nil {
+ return err
+ }
+
+ // Commit.
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.seq)
+ if err := db.s.commit(rec); err != nil {
+ // Close journal on error.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ }
+ return err
+ }
+
+ // Remove the last obsolete journal file.
+ if !ofd.Zero() {
+ db.s.stor.Remove(ofd)
+ }
+
+ return nil
+}
+
+func (db *DB) recoverJournalRO() error {
+ // Get all journals and sort it by file number.
+ rawFds, err := db.s.stor.List(storage.TypeJournal)
+ if err != nil {
+ return err
+ }
+ sortFds(rawFds)
+
+ // Journals that will be recovered.
+ var fds []storage.FileDesc
+ for _, fd := range rawFds {
+ if fd.Num >= db.s.stJournalNum || fd.Num == db.s.stPrevJournalNum {
+ fds = append(fds, fd)
+ }
+ }
+
+ var (
+ // Options.
+ strict = db.s.o.GetStrict(opt.StrictJournal)
+ checksum = db.s.o.GetStrict(opt.StrictJournalChecksum)
+ writeBuffer = db.s.o.GetWriteBuffer()
+
+ mdb = memdb.New(db.s.icmp, writeBuffer)
+ )
+
+ // Recover journals.
+ if len(fds) > 0 {
+ db.logf("journal@recovery RO·Mode F·%d", len(fds))
+
+ var (
+ jr *journal.Reader
+ buf = &util.Buffer{}
+ batchSeq uint64
+ batchLen int
+ )
+
+ for _, fd := range fds {
+ db.logf("journal@recovery recovering @%d", fd.Num)
+
+ fr, err := db.s.stor.Open(fd)
+ if err != nil {
+ return err
+ }
+
+ // Create or reset journal reader instance.
+ if jr == nil {
+ jr = journal.NewReader(fr, dropper{db.s, fd}, strict, checksum)
+ } else {
+ jr.Reset(fr, dropper{db.s, fd}, strict, checksum)
+ }
+
+ // Replay journal to memdb.
+ for {
+ r, err := jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ buf.Reset()
+ if _, err := buf.ReadFrom(r); err != nil {
+ if err == io.ErrUnexpectedEOF {
+ // This is error returned due to corruption, with strict == false.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+ batchSeq, batchLen, err = decodeBatchToMem(buf.Bytes(), db.seq, mdb)
+ if err != nil {
+ if !strict && errors.IsCorrupted(err) {
+ db.s.logf("journal error: %v (skipped)", err)
+ // We won't apply sequence number as it might be corrupted.
+ continue
+ }
+
+ fr.Close()
+ return errors.SetFd(err, fd)
+ }
+
+ // Save sequence number.
+ db.seq = batchSeq + uint64(batchLen)
+ }
+
+ fr.Close()
+ }
+ }
+
+ // Set memDB.
+ db.mem = &memDB{db: db, DB: mdb, ref: 1}
+
+ return nil
+}
+
+func memGet(mdb *memdb.DB, ikey internalKey, icmp *iComparer) (ok bool, mv []byte, err error) {
+ mk, mv, err := mdb.Find(ikey)
+ if err == nil {
+ ukey, _, kt, kerr := parseInternalKey(mk)
+ if kerr != nil {
+ // Shouldn't have had happen.
+ panic(kerr)
+ }
+ if icmp.uCompare(ukey, ikey.ukey()) == 0 {
+ if kt == keyTypeDel {
+ return true, nil, ErrNotFound
+ }
+ return true, mv, nil
+
+ }
+ } else if err != ErrNotFound {
+ return true, nil, err
+ }
+ return
+}
+
+func (db *DB) get(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (value []byte, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, mv, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, mv, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return append([]byte{}, mv...), me
+ }
+ }
+
+ v := db.s.version()
+ value, cSched, err := v.get(auxt, ikey, ro, false)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ return
+}
+
+func nilIfNotFound(err error) error {
+ if err == ErrNotFound {
+ return nil
+ }
+ return err
+}
+
+func (db *DB) has(auxm *memdb.DB, auxt tFiles, key []byte, seq uint64, ro *opt.ReadOptions) (ret bool, err error) {
+ ikey := makeInternalKey(nil, key, seq, keyTypeSeek)
+
+ if auxm != nil {
+ if ok, _, me := memGet(auxm, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ em, fm := db.getMems()
+ for _, m := range [...]*memDB{em, fm} {
+ if m == nil {
+ continue
+ }
+ defer m.decref()
+
+ if ok, _, me := memGet(m.DB, ikey, db.s.icmp); ok {
+ return me == nil, nilIfNotFound(me)
+ }
+ }
+
+ v := db.s.version()
+ _, cSched, err := v.get(auxt, ikey, ro, true)
+ v.release()
+ if cSched {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ if err == nil {
+ ret = true
+ } else if err == ErrNotFound {
+ err = nil
+ }
+ return
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (db *DB) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.get(nil, nil, key, se.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Has returns.
+func (db *DB) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ return db.has(nil, nil, key, se.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the
+// underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (db *DB) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+
+ se := db.acquireSnapshot()
+ defer db.releaseSnapshot(se)
+ // Iterator holds 'version' lock, 'version' is immutable so snapshot
+ // can be released after iterator created.
+ return db.newIterator(nil, nil, se.seq, slice, ro)
+}
+
+// GetSnapshot returns a latest snapshot of the underlying DB. A snapshot
+// is a frozen snapshot of a DB state at a particular point in time. The
+// content of snapshot are guaranteed to be consistent.
+//
+// The snapshot must be released after use, by calling Release method.
+func (db *DB) GetSnapshot() (*Snapshot, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ return db.newSnapshot(), nil
+}
+
+// GetProperty returns value of the given property name.
+//
+// Property names:
+// leveldb.num-files-at-level{n}
+// Returns the number of files at level 'n'.
+// leveldb.stats
+// Returns statistics of the underlying DB.
+// leveldb.iostats
+// Returns statistics of effective disk read and write.
+// leveldb.writedelay
+// Returns cumulative write delay caused by compaction.
+// leveldb.sstables
+// Returns sstables list for each level.
+// leveldb.blockpool
+// Returns block pool stats.
+// leveldb.cachedblock
+// Returns size of cached block.
+// leveldb.openedtables
+// Returns number of opened tables.
+// leveldb.alivesnaps
+// Returns number of alive snapshots.
+// leveldb.aliveiters
+// Returns number of alive iterators.
+func (db *DB) GetProperty(name string) (value string, err error) {
+ err = db.ok()
+ if err != nil {
+ return
+ }
+
+ const prefix = "leveldb."
+ if !strings.HasPrefix(name, prefix) {
+ return "", ErrNotFound
+ }
+ p := name[len(prefix):]
+
+ v := db.s.version()
+ defer v.release()
+
+ numFilesPrefix := "num-files-at-level"
+ switch {
+ case strings.HasPrefix(p, numFilesPrefix):
+ var level uint
+ var rest string
+ n, _ := fmt.Sscanf(p[len(numFilesPrefix):], "%d%s", &level, &rest)
+ if n != 1 {
+ err = ErrNotFound
+ } else {
+ value = fmt.Sprint(v.tLen(int(level)))
+ }
+ case p == "stats":
+ value = "Compactions\n" +
+ " Level | Tables | Size(MB) | Time(sec) | Read(MB) | Write(MB)\n" +
+ "-------+------------+---------------+---------------+---------------+---------------\n"
+ for level, tables := range v.levels {
+ duration, read, write := db.compStats.getStat(level)
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ value += fmt.Sprintf(" %3d | %10d | %13.5f | %13.5f | %13.5f | %13.5f\n",
+ level, len(tables), float64(tables.size())/1048576.0, duration.Seconds(),
+ float64(read)/1048576.0, float64(write)/1048576.0)
+ }
+ case p == "iostats":
+ value = fmt.Sprintf("Read(MB):%.5f Write(MB):%.5f",
+ float64(db.s.stor.reads())/1048576.0,
+ float64(db.s.stor.writes())/1048576.0)
+ case p == "writedelay":
+ writeDelayN, writeDelay := atomic.LoadInt32(&db.cWriteDelayN), time.Duration(atomic.LoadInt64(&db.cWriteDelay))
+ paused := atomic.LoadInt32(&db.inWritePaused) == 1
+ value = fmt.Sprintf("DelayN:%d Delay:%s Paused:%t", writeDelayN, writeDelay, paused)
+ case p == "sstables":
+ for level, tables := range v.levels {
+ value += fmt.Sprintf("--- level %d ---\n", level)
+ for _, t := range tables {
+ value += fmt.Sprintf("%d:%d[%q .. %q]\n", t.fd.Num, t.size, t.imin, t.imax)
+ }
+ }
+ case p == "blockpool":
+ value = fmt.Sprintf("%v", db.s.tops.bpool)
+ case p == "cachedblock":
+ if db.s.tops.bcache != nil {
+ value = fmt.Sprintf("%d", db.s.tops.bcache.Size())
+ } else {
+ value = ""
+ }
+ case p == "openedtables":
+ value = fmt.Sprintf("%d", db.s.tops.cache.Size())
+ case p == "alivesnaps":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveSnaps))
+ case p == "aliveiters":
+ value = fmt.Sprintf("%d", atomic.LoadInt32(&db.aliveIters))
+ default:
+ err = ErrNotFound
+ }
+
+ return
+}
+
+// DBStats is database statistics.
+type DBStats struct {
+ WriteDelayCount int32
+ WriteDelayDuration time.Duration
+ WritePaused bool
+
+ AliveSnapshots int32
+ AliveIterators int32
+
+ IOWrite uint64
+ IORead uint64
+
+ BlockCacheSize int
+ OpenedTablesCount int
+
+ LevelSizes []int64
+ LevelTablesCounts []int
+ LevelRead []int64
+ LevelWrite []int64
+ LevelDurations []time.Duration
+}
+
+// Stats populates s with database statistics.
+func (db *DB) Stats(s *DBStats) error {
+ err := db.ok()
+ if err != nil {
+ return err
+ }
+
+ s.IORead = db.s.stor.reads()
+ s.IOWrite = db.s.stor.writes()
+ s.WriteDelayCount = atomic.LoadInt32(&db.cWriteDelayN)
+ s.WriteDelayDuration = time.Duration(atomic.LoadInt64(&db.cWriteDelay))
+ s.WritePaused = atomic.LoadInt32(&db.inWritePaused) == 1
+
+ s.OpenedTablesCount = db.s.tops.cache.Size()
+ if db.s.tops.bcache != nil {
+ s.BlockCacheSize = db.s.tops.bcache.Size()
+ } else {
+ s.BlockCacheSize = 0
+ }
+
+ s.AliveIterators = atomic.LoadInt32(&db.aliveIters)
+ s.AliveSnapshots = atomic.LoadInt32(&db.aliveSnaps)
+
+ s.LevelDurations = s.LevelDurations[:0]
+ s.LevelRead = s.LevelRead[:0]
+ s.LevelWrite = s.LevelWrite[:0]
+ s.LevelSizes = s.LevelSizes[:0]
+ s.LevelTablesCounts = s.LevelTablesCounts[:0]
+
+ v := db.s.version()
+ defer v.release()
+
+ for level, tables := range v.levels {
+ duration, read, write := db.compStats.getStat(level)
+ if len(tables) == 0 && duration == 0 {
+ continue
+ }
+ s.LevelDurations = append(s.LevelDurations, duration)
+ s.LevelRead = append(s.LevelRead, read)
+ s.LevelWrite = append(s.LevelWrite, write)
+ s.LevelSizes = append(s.LevelSizes, tables.size())
+ s.LevelTablesCounts = append(s.LevelTablesCounts, len(tables))
+ }
+
+ return nil
+}
+
+// SizeOf calculates approximate sizes of the given key ranges.
+// The length of the returned sizes are equal with the length of the given
+// ranges. The returned sizes measure storage space usage, so if the user
+// data compresses by a factor of ten, the returned sizes will be one-tenth
+// the size of the corresponding user data size.
+// The results may not include the sizes of recently written data.
+func (db *DB) SizeOf(ranges []util.Range) (Sizes, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ v := db.s.version()
+ defer v.release()
+
+ sizes := make(Sizes, 0, len(ranges))
+ for _, r := range ranges {
+ imin := makeInternalKey(nil, r.Start, keyMaxSeq, keyTypeSeek)
+ imax := makeInternalKey(nil, r.Limit, keyMaxSeq, keyTypeSeek)
+ start, err := v.offsetOf(imin)
+ if err != nil {
+ return nil, err
+ }
+ limit, err := v.offsetOf(imax)
+ if err != nil {
+ return nil, err
+ }
+ var size int64
+ if limit >= start {
+ size = limit - start
+ }
+ sizes = append(sizes, size)
+ }
+
+ return sizes, nil
+}
+
+// Close closes the DB. This will also releases any outstanding snapshot,
+// abort any in-flight compaction and discard open transaction.
+//
+// It is not safe to close a DB until all outstanding iterators are released.
+// It is valid to call Close multiple times. Other methods should not be
+// called after the DB has been closed.
+func (db *DB) Close() error {
+ if !db.setClosed() {
+ return ErrClosed
+ }
+
+ start := time.Now()
+ db.log("db@close closing")
+
+ // Clear the finalizer.
+ runtime.SetFinalizer(db, nil)
+
+ // Get compaction error.
+ var err error
+ select {
+ case err = <-db.compErrC:
+ if err == ErrReadOnly {
+ err = nil
+ }
+ default:
+ }
+
+ // Signal all goroutines.
+ close(db.closeC)
+
+ // Discard open transaction.
+ if db.tr != nil {
+ db.tr.Discard()
+ }
+
+ // Acquire writer lock.
+ db.writeLockC <- struct{}{}
+
+ // Wait for all gorotines to exit.
+ db.closeW.Wait()
+
+ // Closes journal.
+ if db.journal != nil {
+ db.journal.Close()
+ db.journalWriter.Close()
+ db.journal = nil
+ db.journalWriter = nil
+ }
+
+ if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ }
+
+ // Close session.
+ db.s.close()
+ db.logf("db@close done T·%v", time.Since(start))
+ db.s.release()
+
+ if db.closer != nil {
+ if err1 := db.closer.Close(); err == nil {
+ err = err1
+ }
+ db.closer = nil
+ }
+
+ // Clear memdbs.
+ db.clearMems()
+
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
new file mode 100644
index 000000000..28e50906a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_compaction.go
@@ -0,0 +1,860 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+var (
+ errCompactionTransactExiting = errors.New("leveldb: compaction transact exiting")
+)
+
+type cStat struct {
+ duration time.Duration
+ read int64
+ write int64
+}
+
+func (p *cStat) add(n *cStatStaging) {
+ p.duration += n.duration
+ p.read += n.read
+ p.write += n.write
+}
+
+func (p *cStat) get() (duration time.Duration, read, write int64) {
+ return p.duration, p.read, p.write
+}
+
+type cStatStaging struct {
+ start time.Time
+ duration time.Duration
+ on bool
+ read int64
+ write int64
+}
+
+func (p *cStatStaging) startTimer() {
+ if !p.on {
+ p.start = time.Now()
+ p.on = true
+ }
+}
+
+func (p *cStatStaging) stopTimer() {
+ if p.on {
+ p.duration += time.Since(p.start)
+ p.on = false
+ }
+}
+
+type cStats struct {
+ lk sync.Mutex
+ stats []cStat
+}
+
+func (p *cStats) addStat(level int, n *cStatStaging) {
+ p.lk.Lock()
+ if level >= len(p.stats) {
+ newStats := make([]cStat, level+1)
+ copy(newStats, p.stats)
+ p.stats = newStats
+ }
+ p.stats[level].add(n)
+ p.lk.Unlock()
+}
+
+func (p *cStats) getStat(level int) (duration time.Duration, read, write int64) {
+ p.lk.Lock()
+ defer p.lk.Unlock()
+ if level < len(p.stats) {
+ return p.stats[level].get()
+ }
+ return
+}
+
+func (db *DB) compactionError() {
+ var err error
+noerr:
+ // No error.
+ for {
+ select {
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ goto haserr
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+haserr:
+ // Transient error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case err = <-db.compErrSetC:
+ switch {
+ case err == nil:
+ goto noerr
+ case err == ErrReadOnly, errors.IsCorrupted(err):
+ goto hasperr
+ default:
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+hasperr:
+ // Persistent error.
+ for {
+ select {
+ case db.compErrC <- err:
+ case db.compPerErrC <- err:
+ case db.writeLockC <- struct{}{}:
+ // Hold write lock, so that write won't pass-through.
+ db.compWriteLocking = true
+ case <-db.closeC:
+ if db.compWriteLocking {
+ // We should release the lock or Close will hang.
+ <-db.writeLockC
+ }
+ return
+ }
+ }
+}
+
+type compactionTransactCounter int
+
+func (cnt *compactionTransactCounter) incr() {
+ *cnt++
+}
+
+type compactionTransactInterface interface {
+ run(cnt *compactionTransactCounter) error
+ revert() error
+}
+
+func (db *DB) compactionTransact(name string, t compactionTransactInterface) {
+ defer func() {
+ if x := recover(); x != nil {
+ if x == errCompactionTransactExiting {
+ if err := t.revert(); err != nil {
+ db.logf("%s revert error %q", name, err)
+ }
+ }
+ panic(x)
+ }
+ }()
+
+ const (
+ backoffMin = 1 * time.Second
+ backoffMax = 8 * time.Second
+ backoffMul = 2 * time.Second
+ )
+ var (
+ backoff = backoffMin
+ backoffT = time.NewTimer(backoff)
+ lastCnt = compactionTransactCounter(0)
+
+ disableBackoff = db.s.o.GetDisableCompactionBackoff()
+ )
+ for n := 0; ; n++ {
+ // Check whether the DB is closed.
+ if db.isClosed() {
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ } else if n > 0 {
+ db.logf("%s retrying N·%d", name, n)
+ }
+
+ // Execute.
+ cnt := compactionTransactCounter(0)
+ err := t.run(&cnt)
+ if err != nil {
+ db.logf("%s error I·%d %q", name, cnt, err)
+ }
+
+ // Set compaction error status.
+ select {
+ case db.compErrSetC <- err:
+ case perr := <-db.compPerErrC:
+ if err != nil {
+ db.logf("%s exiting (persistent error %q)", name, perr)
+ db.compactionExitTransact()
+ }
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ if err == nil {
+ return
+ }
+ if errors.IsCorrupted(err) {
+ db.logf("%s exiting (corruption detected)", name)
+ db.compactionExitTransact()
+ }
+
+ if !disableBackoff {
+ // Reset backoff duration if counter is advancing.
+ if cnt > lastCnt {
+ backoff = backoffMin
+ lastCnt = cnt
+ }
+
+ // Backoff.
+ backoffT.Reset(backoff)
+ if backoff < backoffMax {
+ backoff *= backoffMul
+ if backoff > backoffMax {
+ backoff = backoffMax
+ }
+ }
+ select {
+ case <-backoffT.C:
+ case <-db.closeC:
+ db.logf("%s exiting", name)
+ db.compactionExitTransact()
+ }
+ }
+ }
+}
+
+type compactionTransactFunc struct {
+ runFunc func(cnt *compactionTransactCounter) error
+ revertFunc func() error
+}
+
+func (t *compactionTransactFunc) run(cnt *compactionTransactCounter) error {
+ return t.runFunc(cnt)
+}
+
+func (t *compactionTransactFunc) revert() error {
+ if t.revertFunc != nil {
+ return t.revertFunc()
+ }
+ return nil
+}
+
+func (db *DB) compactionTransactFunc(name string, run func(cnt *compactionTransactCounter) error, revert func() error) {
+ db.compactionTransact(name, &compactionTransactFunc{run, revert})
+}
+
+func (db *DB) compactionExitTransact() {
+ panic(errCompactionTransactExiting)
+}
+
+func (db *DB) compactionCommit(name string, rec *sessionRecord) {
+ db.compCommitLk.Lock()
+ defer db.compCommitLk.Unlock() // Defer is necessary.
+ db.compactionTransactFunc(name+"@commit", func(cnt *compactionTransactCounter) error {
+ return db.s.commit(rec)
+ }, nil)
+}
+
+func (db *DB) memCompaction() {
+ mdb := db.getFrozenMem()
+ if mdb == nil {
+ return
+ }
+ defer mdb.decref()
+
+ db.logf("memdb@flush N·%d S·%s", mdb.Len(), shortenb(mdb.Size()))
+
+ // Don't compact empty memdb.
+ if mdb.Len() == 0 {
+ db.logf("memdb@flush skipping")
+ // drop frozen memdb
+ db.dropFrozenMem()
+ return
+ }
+
+ // Pause table compaction.
+ resumeC := make(chan struct{})
+ select {
+ case db.tcompPauseC <- (chan<- struct{})(resumeC):
+ case <-db.compPerErrC:
+ close(resumeC)
+ resumeC = nil
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+
+ var (
+ rec = &sessionRecord{}
+ stats = &cStatStaging{}
+ flushLevel int
+ )
+
+ // Generate tables.
+ db.compactionTransactFunc("memdb@flush", func(cnt *compactionTransactCounter) (err error) {
+ stats.startTimer()
+ flushLevel, err = db.s.flushMemdb(rec, mdb.DB, db.memdbMaxLevel)
+ stats.stopTimer()
+ return
+ }, func() error {
+ for _, r := range rec.addedTables {
+ db.logf("memdb@flush revert @%d", r.num)
+ if err := db.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: r.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+
+ rec.setJournalNum(db.journalFd.Num)
+ rec.setSeqNum(db.frozenSeq)
+
+ // Commit.
+ stats.startTimer()
+ db.compactionCommit("memdb", rec)
+ stats.stopTimer()
+
+ db.logf("memdb@flush committed F·%d T·%v", len(rec.addedTables), stats.duration)
+
+ for _, r := range rec.addedTables {
+ stats.write += r.size
+ }
+ db.compStats.addStat(flushLevel, stats)
+
+ // Drop frozen memdb.
+ db.dropFrozenMem()
+
+ // Resume table compaction.
+ if resumeC != nil {
+ select {
+ case <-resumeC:
+ close(resumeC)
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+ }
+
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+}
+
+type tableCompactionBuilder struct {
+ db *DB
+ s *session
+ c *compaction
+ rec *sessionRecord
+ stat0, stat1 *cStatStaging
+
+ snapHasLastUkey bool
+ snapLastUkey []byte
+ snapLastSeq uint64
+ snapIter int
+ snapKerrCnt int
+ snapDropCnt int
+
+ kerrCnt int
+ dropCnt int
+
+ minSeq uint64
+ strict bool
+ tableSize int
+
+ tw *tWriter
+}
+
+func (b *tableCompactionBuilder) appendKV(key, value []byte) error {
+ // Create new table if not already.
+ if b.tw == nil {
+ // Check for pause event.
+ if b.db != nil {
+ select {
+ case ch := <-b.db.tcompPauseC:
+ b.db.pauseCompaction(ch)
+ case <-b.db.closeC:
+ b.db.compactionExitTransact()
+ default:
+ }
+ }
+
+ // Create new table.
+ var err error
+ b.tw, err = b.s.tops.create()
+ if err != nil {
+ return err
+ }
+ }
+
+ // Write key/value into table.
+ return b.tw.append(key, value)
+}
+
+func (b *tableCompactionBuilder) needFlush() bool {
+ return b.tw.tw.BytesLen() >= b.tableSize
+}
+
+func (b *tableCompactionBuilder) flush() error {
+ t, err := b.tw.finish()
+ if err != nil {
+ return err
+ }
+ b.rec.addTableFile(b.c.sourceLevel+1, t)
+ b.stat1.write += t.size
+ b.s.logf("table@build created L%d@%d N·%d S·%s %q:%q", b.c.sourceLevel+1, t.fd.Num, b.tw.tw.EntriesLen(), shortenb(int(t.size)), t.imin, t.imax)
+ b.tw = nil
+ return nil
+}
+
+func (b *tableCompactionBuilder) cleanup() {
+ if b.tw != nil {
+ b.tw.drop()
+ b.tw = nil
+ }
+}
+
+func (b *tableCompactionBuilder) run(cnt *compactionTransactCounter) error {
+ snapResumed := b.snapIter > 0
+ hasLastUkey := b.snapHasLastUkey // The key might has zero length, so this is necessary.
+ lastUkey := append([]byte{}, b.snapLastUkey...)
+ lastSeq := b.snapLastSeq
+ b.kerrCnt = b.snapKerrCnt
+ b.dropCnt = b.snapDropCnt
+ // Restore compaction state.
+ b.c.restore()
+
+ defer b.cleanup()
+
+ b.stat1.startTimer()
+ defer b.stat1.stopTimer()
+
+ iter := b.c.newIterator()
+ defer iter.Release()
+ for i := 0; iter.Next(); i++ {
+ // Incr transact counter.
+ cnt.incr()
+
+ // Skip until last state.
+ if i < b.snapIter {
+ continue
+ }
+
+ resumed := false
+ if snapResumed {
+ resumed = true
+ snapResumed = false
+ }
+
+ ikey := iter.Key()
+ ukey, seq, kt, kerr := parseInternalKey(ikey)
+
+ if kerr == nil {
+ shouldStop := !resumed && b.c.shouldStopBefore(ikey)
+
+ if !hasLastUkey || b.s.icmp.uCompare(lastUkey, ukey) != 0 {
+ // First occurrence of this user key.
+
+ // Only rotate tables if ukey doesn't hop across.
+ if b.tw != nil && (shouldStop || b.needFlush()) {
+ if err := b.flush(); err != nil {
+ return err
+ }
+
+ // Creates snapshot of the state.
+ b.c.save()
+ b.snapHasLastUkey = hasLastUkey
+ b.snapLastUkey = append(b.snapLastUkey[:0], lastUkey...)
+ b.snapLastSeq = lastSeq
+ b.snapIter = i
+ b.snapKerrCnt = b.kerrCnt
+ b.snapDropCnt = b.dropCnt
+ }
+
+ hasLastUkey = true
+ lastUkey = append(lastUkey[:0], ukey...)
+ lastSeq = keyMaxSeq
+ }
+
+ switch {
+ case lastSeq <= b.minSeq:
+ // Dropped because newer entry for same user key exist
+ fallthrough // (A)
+ case kt == keyTypeDel && seq <= b.minSeq && b.c.baseLevelForKey(lastUkey):
+ // For this user key:
+ // (1) there is no data in higher levels
+ // (2) data in lower levels will have larger seq numbers
+ // (3) data in layers that are being compacted here and have
+ // smaller seq numbers will be dropped in the next
+ // few iterations of this loop (by rule (A) above).
+ // Therefore this deletion marker is obsolete and can be dropped.
+ lastSeq = seq
+ b.dropCnt++
+ continue
+ default:
+ lastSeq = seq
+ }
+ } else {
+ if b.strict {
+ return kerr
+ }
+
+ // Don't drop corrupted keys.
+ hasLastUkey = false
+ lastUkey = lastUkey[:0]
+ lastSeq = keyMaxSeq
+ b.kerrCnt++
+ }
+
+ if err := b.appendKV(ikey, iter.Value()); err != nil {
+ return err
+ }
+ }
+
+ if err := iter.Error(); err != nil {
+ return err
+ }
+
+ // Finish last table.
+ if b.tw != nil && !b.tw.empty() {
+ return b.flush()
+ }
+ return nil
+}
+
+func (b *tableCompactionBuilder) revert() error {
+ for _, at := range b.rec.addedTables {
+ b.s.logf("table@build revert @%d", at.num)
+ if err := b.s.stor.Remove(storage.FileDesc{Type: storage.TypeTable, Num: at.num}); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (db *DB) tableCompaction(c *compaction, noTrivial bool) {
+ defer c.release()
+
+ rec := &sessionRecord{}
+ rec.addCompPtr(c.sourceLevel, c.imax)
+
+ if !noTrivial && c.trivial() {
+ t := c.levels[0][0]
+ db.logf("table@move L%d@%d -> L%d", c.sourceLevel, t.fd.Num, c.sourceLevel+1)
+ rec.delTable(c.sourceLevel, t.fd.Num)
+ rec.addTableFile(c.sourceLevel+1, t)
+ db.compactionCommit("table-move", rec)
+ return
+ }
+
+ var stats [2]cStatStaging
+ for i, tables := range c.levels {
+ for _, t := range tables {
+ stats[i].read += t.size
+ // Insert deleted tables into record
+ rec.delTable(c.sourceLevel+i, t.fd.Num)
+ }
+ }
+ sourceSize := int(stats[0].read + stats[1].read)
+ minSeq := db.minSeq()
+ db.logf("table@compaction L%d·%d -> L%d·%d S·%s Q·%d", c.sourceLevel, len(c.levels[0]), c.sourceLevel+1, len(c.levels[1]), shortenb(sourceSize), minSeq)
+
+ b := &tableCompactionBuilder{
+ db: db,
+ s: db.s,
+ c: c,
+ rec: rec,
+ stat1: &stats[1],
+ minSeq: minSeq,
+ strict: db.s.o.GetStrict(opt.StrictCompaction),
+ tableSize: db.s.o.GetCompactionTableSize(c.sourceLevel + 1),
+ }
+ db.compactionTransact("table@build", b)
+
+ // Commit.
+ stats[1].startTimer()
+ db.compactionCommit("table", rec)
+ stats[1].stopTimer()
+
+ resultSize := int(stats[1].write)
+ db.logf("table@compaction committed F%s S%s Ke·%d D·%d T·%v", sint(len(rec.addedTables)-len(rec.deletedTables)), sshortenb(resultSize-sourceSize), b.kerrCnt, b.dropCnt, stats[1].duration)
+
+ // Save compaction stats
+ for i := range stats {
+ db.compStats.addStat(c.sourceLevel+1, &stats[i])
+ }
+}
+
+func (db *DB) tableRangeCompaction(level int, umin, umax []byte) error {
+ db.logf("table@compaction range L%d %q:%q", level, umin, umax)
+ if level >= 0 {
+ if c := db.s.getCompactionRange(level, umin, umax, true); c != nil {
+ db.tableCompaction(c, true)
+ }
+ } else {
+ // Retry until nothing to compact.
+ for {
+ compacted := false
+
+ // Scan for maximum level with overlapped tables.
+ v := db.s.version()
+ m := 1
+ for i := m; i < len(v.levels); i++ {
+ tables := v.levels[i]
+ if tables.overlaps(db.s.icmp, umin, umax, false) {
+ m = i
+ }
+ }
+ v.release()
+
+ for level := 0; level < m; level++ {
+ if c := db.s.getCompactionRange(level, umin, umax, false); c != nil {
+ db.tableCompaction(c, true)
+ compacted = true
+ }
+ }
+
+ if !compacted {
+ break
+ }
+ }
+ }
+
+ return nil
+}
+
+func (db *DB) tableAutoCompaction() {
+ if c := db.s.pickCompaction(); c != nil {
+ db.tableCompaction(c, false)
+ }
+}
+
+func (db *DB) tableNeedCompaction() bool {
+ v := db.s.version()
+ defer v.release()
+ return v.needCompaction()
+}
+
+// resumeWrite returns an indicator whether we should resume write operation if enough level0 files are compacted.
+func (db *DB) resumeWrite() bool {
+ v := db.s.version()
+ defer v.release()
+ if v.tLen(0) < db.s.o.GetWriteL0PauseTrigger() {
+ return true
+ }
+ return false
+}
+
+func (db *DB) pauseCompaction(ch chan<- struct{}) {
+ select {
+ case ch <- struct{}{}:
+ case <-db.closeC:
+ db.compactionExitTransact()
+ }
+}
+
+type cCmd interface {
+ ack(err error)
+}
+
+type cAuto struct {
+ // Note for table compaction, an empty ackC represents it's a compaction waiting command.
+ ackC chan<- error
+}
+
+func (r cAuto) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+type cRange struct {
+ level int
+ min, max []byte
+ ackC chan<- error
+}
+
+func (r cRange) ack(err error) {
+ if r.ackC != nil {
+ defer func() {
+ recover()
+ }()
+ r.ackC <- err
+ }
+}
+
+// This will trigger auto compaction but will not wait for it.
+func (db *DB) compTrigger(compC chan<- cCmd) {
+ select {
+ case compC <- cAuto{}:
+ default:
+ }
+}
+
+// This will trigger auto compaction and/or wait for all compaction to be done.
+func (db *DB) compTriggerWait(compC chan<- cCmd) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cAuto{ch}:
+ case err = <-db.compErrC:
+ return
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+// Send range compaction request.
+func (db *DB) compTriggerRange(compC chan<- cCmd, level int, min, max []byte) (err error) {
+ ch := make(chan error)
+ defer close(ch)
+ // Send cmd.
+ select {
+ case compC <- cRange{level, min, max, ch}:
+ case err := <-db.compErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+ // Wait cmd.
+ select {
+ case err = <-ch:
+ case err = <-db.compErrC:
+ case <-db.closeC:
+ return ErrClosed
+ }
+ return err
+}
+
+func (db *DB) mCompaction() {
+ var x cCmd
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ select {
+ case x = <-db.mcompCmdC:
+ switch x.(type) {
+ case cAuto:
+ db.memCompaction()
+ x.ack(nil)
+ x = nil
+ default:
+ panic("leveldb: unknown command")
+ }
+ case <-db.closeC:
+ return
+ }
+ }
+}
+
+func (db *DB) tCompaction() {
+ var (
+ x cCmd
+ ackQ, waitQ []cCmd
+ )
+
+ defer func() {
+ if x := recover(); x != nil {
+ if x != errCompactionTransactExiting {
+ panic(x)
+ }
+ }
+ for i := range ackQ {
+ ackQ[i].ack(ErrClosed)
+ ackQ[i] = nil
+ }
+ for i := range waitQ {
+ waitQ[i].ack(ErrClosed)
+ waitQ[i] = nil
+ }
+ if x != nil {
+ x.ack(ErrClosed)
+ }
+ db.closeW.Done()
+ }()
+
+ for {
+ if db.tableNeedCompaction() {
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ default:
+ }
+ // Resume write operation as soon as possible.
+ if len(waitQ) > 0 && db.resumeWrite() {
+ for i := range waitQ {
+ waitQ[i].ack(nil)
+ waitQ[i] = nil
+ }
+ waitQ = waitQ[:0]
+ }
+ } else {
+ for i := range ackQ {
+ ackQ[i].ack(nil)
+ ackQ[i] = nil
+ }
+ ackQ = ackQ[:0]
+ for i := range waitQ {
+ waitQ[i].ack(nil)
+ waitQ[i] = nil
+ }
+ waitQ = waitQ[:0]
+ select {
+ case x = <-db.tcompCmdC:
+ case ch := <-db.tcompPauseC:
+ db.pauseCompaction(ch)
+ continue
+ case <-db.closeC:
+ return
+ }
+ }
+ if x != nil {
+ switch cmd := x.(type) {
+ case cAuto:
+ if cmd.ackC != nil {
+ waitQ = append(waitQ, x)
+ } else {
+ ackQ = append(ackQ, x)
+ }
+ case cRange:
+ x.ack(db.tableRangeCompaction(cmd.level, cmd.min, cmd.max))
+ default:
+ panic("leveldb: unknown command")
+ }
+ x = nil
+ }
+ db.tableAutoCompaction()
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
new file mode 100644
index 000000000..03c24cdab
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_iter.go
@@ -0,0 +1,360 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "math/rand"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ errInvalidInternalKey = errors.New("leveldb: Iterator: invalid internal key")
+)
+
+type memdbReleaser struct {
+ once sync.Once
+ m *memDB
+}
+
+func (mr *memdbReleaser) Release() {
+ mr.once.Do(func() {
+ mr.m.decref()
+ })
+}
+
+func (db *DB) newRawIterator(auxm *memDB, auxt tFiles, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ strict := opt.GetStrict(db.s.o.Options, ro, opt.StrictReader)
+ em, fm := db.getMems()
+ v := db.s.version()
+
+ tableIts := v.getIterators(slice, ro)
+ n := len(tableIts) + len(auxt) + 3
+ its := make([]iterator.Iterator, 0, n)
+
+ if auxm != nil {
+ ami := auxm.NewIterator(slice)
+ ami.SetReleaser(&memdbReleaser{m: auxm})
+ its = append(its, ami)
+ }
+ for _, t := range auxt {
+ its = append(its, v.s.tops.newIterator(t, slice, ro))
+ }
+
+ emi := em.NewIterator(slice)
+ emi.SetReleaser(&memdbReleaser{m: em})
+ its = append(its, emi)
+ if fm != nil {
+ fmi := fm.NewIterator(slice)
+ fmi.SetReleaser(&memdbReleaser{m: fm})
+ its = append(its, fmi)
+ }
+ its = append(its, tableIts...)
+ mi := iterator.NewMergedIterator(its, db.s.icmp, strict)
+ mi.SetReleaser(&versionReleaser{v: v})
+ return mi
+}
+
+func (db *DB) newIterator(auxm *memDB, auxt tFiles, seq uint64, slice *util.Range, ro *opt.ReadOptions) *dbIter {
+ var islice *util.Range
+ if slice != nil {
+ islice = &util.Range{}
+ if slice.Start != nil {
+ islice.Start = makeInternalKey(nil, slice.Start, keyMaxSeq, keyTypeSeek)
+ }
+ if slice.Limit != nil {
+ islice.Limit = makeInternalKey(nil, slice.Limit, keyMaxSeq, keyTypeSeek)
+ }
+ }
+ rawIter := db.newRawIterator(auxm, auxt, islice, ro)
+ iter := &dbIter{
+ db: db,
+ icmp: db.s.icmp,
+ iter: rawIter,
+ seq: seq,
+ strict: opt.GetStrict(db.s.o.Options, ro, opt.StrictReader),
+ key: make([]byte, 0),
+ value: make([]byte, 0),
+ }
+ atomic.AddInt32(&db.aliveIters, 1)
+ runtime.SetFinalizer(iter, (*dbIter).Release)
+ return iter
+}
+
+func (db *DB) iterSamplingRate() int {
+ return rand.Intn(2 * db.s.o.GetIteratorSamplingRate())
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+// dbIter represent an interator states over a database session.
+type dbIter struct {
+ db *DB
+ icmp *iComparer
+ iter iterator.Iterator
+ seq uint64
+ strict bool
+
+ smaplingGap int
+ dir dir
+ key []byte
+ value []byte
+ err error
+ releaser util.Releaser
+}
+
+func (i *dbIter) sampleSeek() {
+ ikey := i.iter.Key()
+ i.smaplingGap -= len(ikey) + len(i.iter.Value())
+ for i.smaplingGap < 0 {
+ i.smaplingGap += i.db.iterSamplingRate()
+ i.db.sampleSeek(ikey)
+ }
+}
+
+func (i *dbIter) setErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+}
+
+func (i *dbIter) iterErr() {
+ if err := i.iter.Error(); err != nil {
+ i.setErr(err)
+ }
+}
+
+func (i *dbIter) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *dbIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.First() {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.iter.Last() {
+ return i.prev()
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ikey := makeInternalKey(nil, key, i.seq, keyTypeSeek)
+ if i.iter.Seek(ikey) {
+ i.dir = dirSOI
+ return i.next()
+ }
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+}
+
+func (i *dbIter) next() bool {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ switch kt {
+ case keyTypeDel:
+ // Skip deleted key.
+ i.key = append(i.key[:0], ukey...)
+ i.dir = dirForward
+ case keyTypeVal:
+ if i.dir == dirSOI || i.icmp.uCompare(ukey, i.key) > 0 {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ i.dir = dirForward
+ return true
+ }
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ break
+ }
+ if !i.iter.Next() {
+ i.dir = dirEOI
+ i.iterErr()
+ break
+ }
+ }
+ return false
+}
+
+func (i *dbIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.iter.Next() || (i.dir == dirBackward && !i.iter.Next()) {
+ i.dir = dirEOI
+ i.iterErr()
+ return false
+ }
+ return i.next()
+}
+
+func (i *dbIter) prev() bool {
+ i.dir = dirBackward
+ del := true
+ if i.iter.Valid() {
+ for {
+ if ukey, seq, kt, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if seq <= i.seq {
+ if !del && i.icmp.uCompare(ukey, i.key) < 0 {
+ return true
+ }
+ del = (kt == keyTypeDel)
+ if !del {
+ i.key = append(i.key[:0], ukey...)
+ i.value = append(i.value[:0], i.iter.Value()...)
+ }
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ if !i.iter.Prev() {
+ break
+ }
+ }
+ }
+ if del {
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+ return true
+}
+
+func (i *dbIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ for i.iter.Prev() {
+ if ukey, _, _, kerr := parseInternalKey(i.iter.Key()); kerr == nil {
+ i.sampleSeek()
+ if i.icmp.uCompare(ukey, i.key) < 0 {
+ goto cont
+ }
+ } else if i.strict {
+ i.setErr(kerr)
+ return false
+ }
+ }
+ i.dir = dirSOI
+ i.iterErr()
+ return false
+ }
+
+cont:
+ return i.prev()
+}
+
+func (i *dbIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *dbIter) Release() {
+ if i.dir != dirReleased {
+ // Clear the finalizer.
+ runtime.SetFinalizer(i, nil)
+
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+
+ i.dir = dirReleased
+ i.key = nil
+ i.value = nil
+ i.iter.Release()
+ i.iter = nil
+ atomic.AddInt32(&i.db.aliveIters, -1)
+ i.db = nil
+ }
+}
+
+func (i *dbIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *dbIter) Error() error {
+ return i.err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
new file mode 100644
index 000000000..2c69d2e53
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_snapshot.go
@@ -0,0 +1,183 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "container/list"
+ "fmt"
+ "runtime"
+ "sync"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type snapshotElement struct {
+ seq uint64
+ ref int
+ e *list.Element
+}
+
+// Acquires a snapshot, based on latest sequence.
+func (db *DB) acquireSnapshot() *snapshotElement {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ seq := db.getSeq()
+
+ if e := db.snapsList.Back(); e != nil {
+ se := e.Value.(*snapshotElement)
+ if se.seq == seq {
+ se.ref++
+ return se
+ } else if seq < se.seq {
+ panic("leveldb: sequence number is not increasing")
+ }
+ }
+ se := &snapshotElement{seq: seq, ref: 1}
+ se.e = db.snapsList.PushBack(se)
+ return se
+}
+
+// Releases given snapshot element.
+func (db *DB) releaseSnapshot(se *snapshotElement) {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ se.ref--
+ if se.ref == 0 {
+ db.snapsList.Remove(se.e)
+ se.e = nil
+ } else if se.ref < 0 {
+ panic("leveldb: Snapshot: negative element reference")
+ }
+}
+
+// Gets minimum sequence that not being snapshotted.
+func (db *DB) minSeq() uint64 {
+ db.snapsMu.Lock()
+ defer db.snapsMu.Unlock()
+
+ if e := db.snapsList.Front(); e != nil {
+ return e.Value.(*snapshotElement).seq
+ }
+
+ return db.getSeq()
+}
+
+// Snapshot is a DB snapshot.
+type Snapshot struct {
+ db *DB
+ elem *snapshotElement
+ mu sync.RWMutex
+ released bool
+}
+
+// Creates new snapshot object.
+func (db *DB) newSnapshot() *Snapshot {
+ snap := &Snapshot{
+ db: db,
+ elem: db.acquireSnapshot(),
+ }
+ atomic.AddInt32(&db.aliveSnaps, 1)
+ runtime.SetFinalizer(snap, (*Snapshot).Release)
+ return snap
+}
+
+func (snap *Snapshot) String() string {
+ return fmt.Sprintf("leveldb.Snapshot{%d}", snap.elem.seq)
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if
+// the DB does not contains the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.get(nil, nil, key, snap.elem.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (snap *Snapshot) Has(key []byte, ro *opt.ReadOptions) (ret bool, err error) {
+ err = snap.db.ok()
+ if err != nil {
+ return
+ }
+ snap.mu.RLock()
+ defer snap.mu.RUnlock()
+ if snap.released {
+ err = ErrSnapshotReleased
+ return
+ }
+ return snap.db.has(nil, nil, key, snap.elem.seq, ro)
+}
+
+// NewIterator returns an iterator for the snapshot of the underlying DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. The resultant key/value pairs are guaranteed to be
+// consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+// Releasing the snapshot doesn't mean releasing the iterator too, the
+// iterator would be still valid until released.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (snap *Snapshot) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ if err := snap.db.ok(); err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+ if snap.released {
+ return iterator.NewEmptyIterator(ErrSnapshotReleased)
+ }
+ // Since iterator already hold version ref, it doesn't need to
+ // hold snapshot ref.
+ return snap.db.newIterator(nil, nil, snap.elem.seq, slice, ro)
+}
+
+// Release releases the snapshot. This will not release any returned
+// iterators, the iterators would still be valid until released or the
+// underlying DB is closed.
+//
+// Other methods should not be called after the snapshot has been released.
+func (snap *Snapshot) Release() {
+ snap.mu.Lock()
+ defer snap.mu.Unlock()
+
+ if !snap.released {
+ // Clear the finalizer.
+ runtime.SetFinalizer(snap, nil)
+
+ snap.released = true
+ snap.db.releaseSnapshot(snap.elem)
+ atomic.AddInt32(&snap.db.aliveSnaps, -1)
+ snap.db = nil
+ snap.elem = nil
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
new file mode 100644
index 000000000..65e1c54bb
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_state.go
@@ -0,0 +1,239 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+var (
+ errHasFrozenMem = errors.New("has frozen mem")
+)
+
+type memDB struct {
+ db *DB
+ *memdb.DB
+ ref int32
+}
+
+func (m *memDB) getref() int32 {
+ return atomic.LoadInt32(&m.ref)
+}
+
+func (m *memDB) incref() {
+ atomic.AddInt32(&m.ref, 1)
+}
+
+func (m *memDB) decref() {
+ if ref := atomic.AddInt32(&m.ref, -1); ref == 0 {
+ // Only put back memdb with std capacity.
+ if m.Capacity() == m.db.s.o.GetWriteBuffer() {
+ m.Reset()
+ m.db.mpoolPut(m.DB)
+ }
+ m.db = nil
+ m.DB = nil
+ } else if ref < 0 {
+ panic("negative memdb ref")
+ }
+}
+
+// Get latest sequence number.
+func (db *DB) getSeq() uint64 {
+ return atomic.LoadUint64(&db.seq)
+}
+
+// Atomically adds delta to seq.
+func (db *DB) addSeq(delta uint64) {
+ atomic.AddUint64(&db.seq, delta)
+}
+
+func (db *DB) setSeq(seq uint64) {
+ atomic.StoreUint64(&db.seq, seq)
+}
+
+func (db *DB) sampleSeek(ikey internalKey) {
+ v := db.s.version()
+ if v.sampleSeek(ikey) {
+ // Trigger table compaction.
+ db.compTrigger(db.tcompCmdC)
+ }
+ v.release()
+}
+
+func (db *DB) mpoolPut(mem *memdb.DB) {
+ if !db.isClosed() {
+ select {
+ case db.memPool <- mem:
+ default:
+ }
+ }
+}
+
+func (db *DB) mpoolGet(n int) *memDB {
+ var mdb *memdb.DB
+ select {
+ case mdb = <-db.memPool:
+ default:
+ }
+ if mdb == nil || mdb.Capacity() < n {
+ mdb = memdb.New(db.s.icmp, maxInt(db.s.o.GetWriteBuffer(), n))
+ }
+ return &memDB{
+ db: db,
+ DB: mdb,
+ }
+}
+
+func (db *DB) mpoolDrain() {
+ ticker := time.NewTicker(30 * time.Second)
+ for {
+ select {
+ case <-ticker.C:
+ select {
+ case <-db.memPool:
+ default:
+ }
+ case <-db.closeC:
+ ticker.Stop()
+ // Make sure the pool is drained.
+ select {
+ case <-db.memPool:
+ case <-time.After(time.Second):
+ }
+ close(db.memPool)
+ return
+ }
+ }
+}
+
+// Create new memdb and froze the old one; need external synchronization.
+// newMem only called synchronously by the writer.
+func (db *DB) newMem(n int) (mem *memDB, err error) {
+ fd := storage.FileDesc{Type: storage.TypeJournal, Num: db.s.allocFileNum()}
+ w, err := db.s.stor.Create(fd)
+ if err != nil {
+ db.s.reuseFileNum(fd.Num)
+ return
+ }
+
+ db.memMu.Lock()
+ defer db.memMu.Unlock()
+
+ if db.frozenMem != nil {
+ return nil, errHasFrozenMem
+ }
+
+ if db.journal == nil {
+ db.journal = journal.NewWriter(w)
+ } else {
+ db.journal.Reset(w)
+ db.journalWriter.Close()
+ db.frozenJournalFd = db.journalFd
+ }
+ db.journalWriter = w
+ db.journalFd = fd
+ db.frozenMem = db.mem
+ mem = db.mpoolGet(n)
+ mem.incref() // for self
+ mem.incref() // for caller
+ db.mem = mem
+ // The seq only incremented by the writer. And whoever called newMem
+ // should hold write lock, so no need additional synchronization here.
+ db.frozenSeq = db.seq
+ return
+}
+
+// Get all memdbs.
+func (db *DB) getMems() (e, f *memDB) {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.mem, db.frozenMem
+}
+
+// Get effective memdb.
+func (db *DB) getEffectiveMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.mem != nil {
+ db.mem.incref()
+ } else if !db.isClosed() {
+ panic("nil effective mem")
+ }
+ return db.mem
+}
+
+// Check whether we has frozen memdb.
+func (db *DB) hasFrozenMem() bool {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ return db.frozenMem != nil
+}
+
+// Get frozen memdb.
+func (db *DB) getFrozenMem() *memDB {
+ db.memMu.RLock()
+ defer db.memMu.RUnlock()
+ if db.frozenMem != nil {
+ db.frozenMem.incref()
+ }
+ return db.frozenMem
+}
+
+// Drop frozen memdb; assume that frozen memdb isn't nil.
+func (db *DB) dropFrozenMem() {
+ db.memMu.Lock()
+ if err := db.s.stor.Remove(db.frozenJournalFd); err != nil {
+ db.logf("journal@remove removing @%d %q", db.frozenJournalFd.Num, err)
+ } else {
+ db.logf("journal@remove removed @%d", db.frozenJournalFd.Num)
+ }
+ db.frozenJournalFd = storage.FileDesc{}
+ db.frozenMem.decref()
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Clear mems ptr; used by DB.Close().
+func (db *DB) clearMems() {
+ db.memMu.Lock()
+ db.mem = nil
+ db.frozenMem = nil
+ db.memMu.Unlock()
+}
+
+// Set closed flag; return true if not already closed.
+func (db *DB) setClosed() bool {
+ return atomic.CompareAndSwapUint32(&db.closed, 0, 1)
+}
+
+// Check whether DB was closed.
+func (db *DB) isClosed() bool {
+ return atomic.LoadUint32(&db.closed) != 0
+}
+
+// Check read ok status.
+func (db *DB) ok() error {
+ if db.isClosed() {
+ return ErrClosed
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
new file mode 100644
index 000000000..b8f7e7d21
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_transaction.go
@@ -0,0 +1,325 @@
+// Copyright (c) 2016, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var errTransactionDone = errors.New("leveldb: transaction already closed")
+
+// Transaction is the transaction handle.
+type Transaction struct {
+ db *DB
+ lk sync.RWMutex
+ seq uint64
+ mem *memDB
+ tables tFiles
+ ikScratch []byte
+ rec sessionRecord
+ stats cStatStaging
+ closed bool
+}
+
+// Get gets the value for the given key. It returns ErrNotFound if the
+// DB does not contains the key.
+//
+// The returned slice is its own copy, it is safe to modify the contents
+// of the returned slice.
+// It is safe to modify the contents of the argument after Get returns.
+func (tr *Transaction) Get(key []byte, ro *opt.ReadOptions) ([]byte, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return nil, errTransactionDone
+ }
+ return tr.db.get(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// Has returns true if the DB does contains the given key.
+//
+// It is safe to modify the contents of the argument after Has returns.
+func (tr *Transaction) Has(key []byte, ro *opt.ReadOptions) (bool, error) {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return false, errTransactionDone
+ }
+ return tr.db.has(tr.mem.DB, tr.tables, key, tr.seq, ro)
+}
+
+// NewIterator returns an iterator for the latest snapshot of the transaction.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently while writes to the
+// transaction. The resultant key/value pairs are guaranteed to be consistent.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (tr *Transaction) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ tr.lk.RLock()
+ defer tr.lk.RUnlock()
+ if tr.closed {
+ return iterator.NewEmptyIterator(errTransactionDone)
+ }
+ tr.mem.incref()
+ return tr.db.newIterator(tr.mem, tr.tables, tr.seq, slice, ro)
+}
+
+func (tr *Transaction) flush() error {
+ // Flush memdb.
+ if tr.mem.Len() != 0 {
+ tr.stats.startTimer()
+ iter := tr.mem.NewIterator(nil)
+ t, n, err := tr.db.s.tops.createFrom(iter)
+ iter.Release()
+ tr.stats.stopTimer()
+ if err != nil {
+ return err
+ }
+ if tr.mem.getref() == 1 {
+ tr.mem.Reset()
+ } else {
+ tr.mem.decref()
+ tr.mem = tr.db.mpoolGet(0)
+ tr.mem.incref()
+ }
+ tr.tables = append(tr.tables, t)
+ tr.rec.addTableFile(0, t)
+ tr.stats.write += t.size
+ tr.db.logf("transaction@flush created L0@%d N·%d S·%s %q:%q", t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ }
+ return nil
+}
+
+func (tr *Transaction) put(kt keyType, key, value []byte) error {
+ tr.ikScratch = makeInternalKey(tr.ikScratch, key, tr.seq+1, kt)
+ if tr.mem.Free() < len(tr.ikScratch)+len(value) {
+ if err := tr.flush(); err != nil {
+ return err
+ }
+ }
+ if err := tr.mem.Put(tr.ikScratch, value); err != nil {
+ return err
+ }
+ tr.seq++
+ return nil
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (tr *Transaction) Put(key, value []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeVal, key, value)
+}
+
+// Delete deletes the value for the given key.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (tr *Transaction) Delete(key []byte, wo *opt.WriteOptions) error {
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return tr.put(keyTypeDel, key, nil)
+}
+
+// Write apply the given batch to the transaction. The batch will be applied
+// sequentially.
+// Please note that the transaction is not compacted until committed, so if you
+// writes 10 same keys, then those 10 same keys are in the transaction.
+//
+// It is safe to modify the contents of the arguments after Write returns.
+func (tr *Transaction) Write(b *Batch, wo *opt.WriteOptions) error {
+ if b == nil || b.Len() == 0 {
+ return nil
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ return b.replayInternal(func(i int, kt keyType, k, v []byte) error {
+ return tr.put(kt, k, v)
+ })
+}
+
+func (tr *Transaction) setDone() {
+ tr.closed = true
+ tr.db.tr = nil
+ tr.mem.decref()
+ <-tr.db.writeLockC
+}
+
+// Commit commits the transaction. If error is not nil, then the transaction is
+// not committed, it can then either be retried or discarded.
+//
+// Other methods should not be called after transaction has been committed.
+func (tr *Transaction) Commit() error {
+ if err := tr.db.ok(); err != nil {
+ return err
+ }
+
+ tr.lk.Lock()
+ defer tr.lk.Unlock()
+ if tr.closed {
+ return errTransactionDone
+ }
+ if err := tr.flush(); err != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return err
+ }
+ if len(tr.tables) != 0 {
+ // Committing transaction.
+ tr.rec.setSeqNum(tr.seq)
+ tr.db.compCommitLk.Lock()
+ tr.stats.startTimer()
+ var cerr error
+ for retry := 0; retry < 3; retry++ {
+ cerr = tr.db.s.commit(&tr.rec)
+ if cerr != nil {
+ tr.db.logf("transaction@commit error R·%d %q", retry, cerr)
+ select {
+ case <-time.After(time.Second):
+ case <-tr.db.closeC:
+ tr.db.logf("transaction@commit exiting")
+ tr.db.compCommitLk.Unlock()
+ return cerr
+ }
+ } else {
+ // Success. Set db.seq.
+ tr.db.setSeq(tr.seq)
+ break
+ }
+ }
+ tr.stats.stopTimer()
+ if cerr != nil {
+ // Return error, lets user decide either to retry or discard
+ // transaction.
+ return cerr
+ }
+
+ // Update compaction stats. This is safe as long as we hold compCommitLk.
+ tr.db.compStats.addStat(0, &tr.stats)
+
+ // Trigger table auto-compaction.
+ tr.db.compTrigger(tr.db.tcompCmdC)
+ tr.db.compCommitLk.Unlock()
+
+ // Additionally, wait compaction when certain threshold reached.
+ // Ignore error, returns error only if transaction can't be committed.
+ tr.db.waitCompaction()
+ }
+ // Only mark as done if transaction committed successfully.
+ tr.setDone()
+ return nil
+}
+
+func (tr *Transaction) discard() {
+ // Discard transaction.
+ for _, t := range tr.tables {
+ tr.db.logf("transaction@discard @%d", t.fd.Num)
+ if err1 := tr.db.s.stor.Remove(t.fd); err1 == nil {
+ tr.db.s.reuseFileNum(t.fd.Num)
+ }
+ }
+}
+
+// Discard discards the transaction.
+//
+// Other methods should not be called after transaction has been discarded.
+func (tr *Transaction) Discard() {
+ tr.lk.Lock()
+ if !tr.closed {
+ tr.discard()
+ tr.setDone()
+ }
+ tr.lk.Unlock()
+}
+
+func (db *DB) waitCompaction() error {
+ if db.s.tLen(0) >= db.s.o.GetWriteL0PauseTrigger() {
+ return db.compTriggerWait(db.tcompCmdC)
+ }
+ return nil
+}
+
+// OpenTransaction opens an atomic DB transaction. Only one transaction can be
+// opened at a time. Subsequent call to Write and OpenTransaction will be blocked
+// until in-flight transaction is committed or discarded.
+// The returned transaction handle is safe for concurrent use.
+//
+// Transaction is expensive and can overwhelm compaction, especially if
+// transaction size is small. Use with caution.
+//
+// The transaction must be closed once done, either by committing or discarding
+// the transaction.
+// Closing the DB will discard open transaction.
+func (db *DB) OpenTransaction() (*Transaction, error) {
+ if err := db.ok(); err != nil {
+ return nil, err
+ }
+
+ // The write happen synchronously.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return nil, err
+ case <-db.closeC:
+ return nil, ErrClosed
+ }
+
+ if db.tr != nil {
+ panic("leveldb: has open transaction")
+ }
+
+ // Flush current memdb.
+ if db.mem != nil && db.mem.Len() != 0 {
+ if _, err := db.rotateMem(0, true); err != nil {
+ return nil, err
+ }
+ }
+
+ // Wait compaction when certain threshold reached.
+ if err := db.waitCompaction(); err != nil {
+ return nil, err
+ }
+
+ tr := &Transaction{
+ db: db,
+ seq: db.seq,
+ mem: db.mpoolGet(0),
+ }
+ tr.mem.incref()
+ db.tr = tr
+ return tr, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
new file mode 100644
index 000000000..7ecd960d2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_util.go
@@ -0,0 +1,102 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader is the interface that wraps basic Get and NewIterator methods.
+// This interface implemented by both DB and Snapshot.
+type Reader interface {
+ Get(key []byte, ro *opt.ReadOptions) (value []byte, err error)
+ NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator
+}
+
+// Sizes is list of size.
+type Sizes []int64
+
+// Sum returns sum of the sizes.
+func (sizes Sizes) Sum() int64 {
+ var sum int64
+ for _, size := range sizes {
+ sum += size
+ }
+ return sum
+}
+
+// Logging.
+func (db *DB) log(v ...interface{}) { db.s.log(v...) }
+func (db *DB) logf(format string, v ...interface{}) { db.s.logf(format, v...) }
+
+// Check and clean files.
+func (db *DB) checkAndCleanFiles() error {
+ v := db.s.version()
+ defer v.release()
+
+ tmap := make(map[int64]bool)
+ for _, tables := range v.levels {
+ for _, t := range tables {
+ tmap[t.fd.Num] = false
+ }
+ }
+
+ fds, err := db.s.stor.List(storage.TypeAll)
+ if err != nil {
+ return err
+ }
+
+ var nt int
+ var rem []storage.FileDesc
+ for _, fd := range fds {
+ keep := true
+ switch fd.Type {
+ case storage.TypeManifest:
+ keep = fd.Num >= db.s.manifestFd.Num
+ case storage.TypeJournal:
+ if !db.frozenJournalFd.Zero() {
+ keep = fd.Num >= db.frozenJournalFd.Num
+ } else {
+ keep = fd.Num >= db.journalFd.Num
+ }
+ case storage.TypeTable:
+ _, keep = tmap[fd.Num]
+ if keep {
+ tmap[fd.Num] = true
+ nt++
+ }
+ }
+
+ if !keep {
+ rem = append(rem, fd)
+ }
+ }
+
+ if nt != len(tmap) {
+ var mfds []storage.FileDesc
+ for num, present := range tmap {
+ if !present {
+ mfds = append(mfds, storage.FileDesc{storage.TypeTable, num})
+ db.logf("db@janitor table missing @%d", num)
+ }
+ }
+ return errors.NewErrCorrupted(storage.FileDesc{}, &errors.ErrMissingFiles{Fds: mfds})
+ }
+
+ db.logf("db@janitor F·%d G·%d", len(fds), len(rem))
+ for _, fd := range rem {
+ db.logf("db@janitor removing %s-%d", fd.Type, fd.Num)
+ if err := db.s.stor.Remove(fd); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
new file mode 100644
index 000000000..db0c1bece
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/db_write.go
@@ -0,0 +1,464 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+ "time"
+
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func (db *DB) writeJournal(batches []*Batch, seq uint64, sync bool) error {
+ wr, err := db.journal.Next()
+ if err != nil {
+ return err
+ }
+ if err := writeBatchesWithHeader(wr, batches, seq); err != nil {
+ return err
+ }
+ if err := db.journal.Flush(); err != nil {
+ return err
+ }
+ if sync {
+ return db.journalWriter.Sync()
+ }
+ return nil
+}
+
+func (db *DB) rotateMem(n int, wait bool) (mem *memDB, err error) {
+ retryLimit := 3
+retry:
+ // Wait for pending memdb compaction.
+ err = db.compTriggerWait(db.mcompCmdC)
+ if err != nil {
+ return
+ }
+ retryLimit--
+
+ // Create new memdb and journal.
+ mem, err = db.newMem(n)
+ if err != nil {
+ if err == errHasFrozenMem {
+ if retryLimit <= 0 {
+ panic("BUG: still has frozen memdb")
+ }
+ goto retry
+ }
+ return
+ }
+
+ // Schedule memdb compaction.
+ if wait {
+ err = db.compTriggerWait(db.mcompCmdC)
+ } else {
+ db.compTrigger(db.mcompCmdC)
+ }
+ return
+}
+
+func (db *DB) flush(n int) (mdb *memDB, mdbFree int, err error) {
+ delayed := false
+ slowdownTrigger := db.s.o.GetWriteL0SlowdownTrigger()
+ pauseTrigger := db.s.o.GetWriteL0PauseTrigger()
+ flush := func() (retry bool) {
+ mdb = db.getEffectiveMem()
+ if mdb == nil {
+ err = ErrClosed
+ return false
+ }
+ defer func() {
+ if retry {
+ mdb.decref()
+ mdb = nil
+ }
+ }()
+ tLen := db.s.tLen(0)
+ mdbFree = mdb.Free()
+ switch {
+ case tLen >= slowdownTrigger && !delayed:
+ delayed = true
+ time.Sleep(time.Millisecond)
+ case mdbFree >= n:
+ return false
+ case tLen >= pauseTrigger:
+ delayed = true
+ // Set the write paused flag explicitly.
+ atomic.StoreInt32(&db.inWritePaused, 1)
+ err = db.compTriggerWait(db.tcompCmdC)
+ // Unset the write paused flag.
+ atomic.StoreInt32(&db.inWritePaused, 0)
+ if err != nil {
+ return false
+ }
+ default:
+ // Allow memdb to grow if it has no entry.
+ if mdb.Len() == 0 {
+ mdbFree = n
+ } else {
+ mdb.decref()
+ mdb, err = db.rotateMem(n, false)
+ if err == nil {
+ mdbFree = mdb.Free()
+ } else {
+ mdbFree = 0
+ }
+ }
+ return false
+ }
+ return true
+ }
+ start := time.Now()
+ for flush() {
+ }
+ if delayed {
+ db.writeDelay += time.Since(start)
+ db.writeDelayN++
+ } else if db.writeDelayN > 0 {
+ db.logf("db@write was delayed N·%d T·%v", db.writeDelayN, db.writeDelay)
+ atomic.AddInt32(&db.cWriteDelayN, int32(db.writeDelayN))
+ atomic.AddInt64(&db.cWriteDelay, int64(db.writeDelay))
+ db.writeDelay = 0
+ db.writeDelayN = 0
+ }
+ return
+}
+
+type writeMerge struct {
+ sync bool
+ batch *Batch
+ keyType keyType
+ key, value []byte
+}
+
+func (db *DB) unlockWrite(overflow bool, merged int, err error) {
+ for i := 0; i < merged; i++ {
+ db.writeAckC <- err
+ }
+ if overflow {
+ // Pass lock to the next write (that failed to merge).
+ db.writeMergedC <- false
+ } else {
+ // Release lock.
+ <-db.writeLockC
+ }
+}
+
+// ourBatch is batch that we can modify.
+func (db *DB) writeLocked(batch, ourBatch *Batch, merge, sync bool) error {
+ // Try to flush memdb. This method would also trying to throttle writes
+ // if it is too fast and compaction cannot catch-up.
+ mdb, mdbFree, err := db.flush(batch.internalLen)
+ if err != nil {
+ db.unlockWrite(false, 0, err)
+ return err
+ }
+ defer mdb.decref()
+
+ var (
+ overflow bool
+ merged int
+ batches = []*Batch{batch}
+ )
+
+ if merge {
+ // Merge limit.
+ var mergeLimit int
+ if batch.internalLen > 128<<10 {
+ mergeLimit = (1 << 20) - batch.internalLen
+ } else {
+ mergeLimit = 128 << 10
+ }
+ mergeCap := mdbFree - batch.internalLen
+ if mergeLimit > mergeCap {
+ mergeLimit = mergeCap
+ }
+
+ merge:
+ for mergeLimit > 0 {
+ select {
+ case incoming := <-db.writeMergeC:
+ if incoming.batch != nil {
+ // Merge batch.
+ if incoming.batch.internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ batches = append(batches, incoming.batch)
+ mergeLimit -= incoming.batch.internalLen
+ } else {
+ // Merge put.
+ internalLen := len(incoming.key) + len(incoming.value) + 8
+ if internalLen > mergeLimit {
+ overflow = true
+ break merge
+ }
+ if ourBatch == nil {
+ ourBatch = db.batchPool.Get().(*Batch)
+ ourBatch.Reset()
+ batches = append(batches, ourBatch)
+ }
+ // We can use same batch since concurrent write doesn't
+ // guarantee write order.
+ ourBatch.appendRec(incoming.keyType, incoming.key, incoming.value)
+ mergeLimit -= internalLen
+ }
+ sync = sync || incoming.sync
+ merged++
+ db.writeMergedC <- true
+
+ default:
+ break merge
+ }
+ }
+ }
+
+ // Release ourBatch if any.
+ if ourBatch != nil {
+ defer db.batchPool.Put(ourBatch)
+ }
+
+ // Seq number.
+ seq := db.seq + 1
+
+ // Write journal.
+ if err := db.writeJournal(batches, seq, sync); err != nil {
+ db.unlockWrite(overflow, merged, err)
+ return err
+ }
+
+ // Put batches.
+ for _, batch := range batches {
+ if err := batch.putMem(seq, mdb.DB); err != nil {
+ panic(err)
+ }
+ seq += uint64(batch.Len())
+ }
+
+ // Incr seq number.
+ db.addSeq(uint64(batchesLen(batches)))
+
+ // Rotate memdb if it's reach the threshold.
+ if batch.internalLen >= mdbFree {
+ db.rotateMem(0, false)
+ }
+
+ db.unlockWrite(overflow, merged, nil)
+ return nil
+}
+
+// Write apply the given batch to the DB. The batch records will be applied
+// sequentially. Write might be used concurrently, when used concurrently and
+// batch is small enough, write will try to merge the batches. Set NoWriteMerge
+// option to true to disable write merge.
+//
+// It is safe to modify the contents of the arguments after Write returns but
+// not before. Write will not modify content of the batch.
+func (db *DB) Write(batch *Batch, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil || batch == nil || batch.Len() == 0 {
+ return err
+ }
+
+ // If the batch size is larger than write buffer, it may justified to write
+ // using transaction instead. Using transaction the batch will be written
+ // into tables directly, skipping the journaling.
+ if batch.internalLen > db.s.o.GetWriteBuffer() && !db.s.o.GetDisableLargeBatchTransaction() {
+ tr, err := db.OpenTransaction()
+ if err != nil {
+ return err
+ }
+ if err := tr.Write(batch, wo); err != nil {
+ tr.Discard()
+ return err
+ }
+ return tr.Commit()
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, batch: batch}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ return db.writeLocked(batch, nil, merge, sync)
+}
+
+func (db *DB) putRec(kt keyType, key, value []byte, wo *opt.WriteOptions) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ merge := !wo.GetNoWriteMerge() && !db.s.o.GetNoWriteMerge()
+ sync := wo.GetSync() && !db.s.o.GetNoSync()
+
+ // Acquire write lock.
+ if merge {
+ select {
+ case db.writeMergeC <- writeMerge{sync: sync, keyType: kt, key: key, value: value}:
+ if <-db.writeMergedC {
+ // Write is merged.
+ return <-db.writeAckC
+ }
+ // Write is not merged, the write lock is handed to us. Continue.
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ } else {
+ select {
+ case db.writeLockC <- struct{}{}:
+ // Write lock acquired.
+ case err := <-db.compPerErrC:
+ // Compaction error.
+ return err
+ case <-db.closeC:
+ // Closed
+ return ErrClosed
+ }
+ }
+
+ batch := db.batchPool.Get().(*Batch)
+ batch.Reset()
+ batch.appendRec(kt, key, value)
+ return db.writeLocked(batch, batch, merge, sync)
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map. Write merge also applies for Put, see
+// Write.
+//
+// It is safe to modify the contents of the arguments after Put returns but not
+// before.
+func (db *DB) Put(key, value []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeVal, key, value, wo)
+}
+
+// Delete deletes the value for the given key. Delete will not returns error if
+// key doesn't exist. Write merge also applies for Delete, see Write.
+//
+// It is safe to modify the contents of the arguments after Delete returns but
+// not before.
+func (db *DB) Delete(key []byte, wo *opt.WriteOptions) error {
+ return db.putRec(keyTypeDel, key, nil, wo)
+}
+
+func isMemOverlaps(icmp *iComparer, mem *memdb.DB, min, max []byte) bool {
+ iter := mem.NewIterator(nil)
+ defer iter.Release()
+ return (max == nil || (iter.First() && icmp.uCompare(max, internalKey(iter.Key()).ukey()) >= 0)) &&
+ (min == nil || (iter.Last() && icmp.uCompare(min, internalKey(iter.Key()).ukey()) <= 0))
+}
+
+// CompactRange compacts the underlying DB for the given key range.
+// In particular, deleted and overwritten versions are discarded,
+// and the data is rearranged to reduce the cost of operations
+// needed to access the data. This operation should typically only
+// be invoked by users who understand the underlying implementation.
+//
+// A nil Range.Start is treated as a key before all keys in the DB.
+// And a nil Range.Limit is treated as a key after all keys in the DB.
+// Therefore if both is nil then it will compact entire DB.
+func (db *DB) CompactRange(r util.Range) error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Check for overlaps in memdb.
+ mdb := db.getEffectiveMem()
+ if mdb == nil {
+ return ErrClosed
+ }
+ defer mdb.decref()
+ if isMemOverlaps(db.s.icmp, mdb.DB, r.Start, r.Limit) {
+ // Memdb compaction.
+ if _, err := db.rotateMem(0, false); err != nil {
+ <-db.writeLockC
+ return err
+ }
+ <-db.writeLockC
+ if err := db.compTriggerWait(db.mcompCmdC); err != nil {
+ return err
+ }
+ } else {
+ <-db.writeLockC
+ }
+
+ // Table compaction.
+ return db.compTriggerRange(db.tcompCmdC, -1, r.Start, r.Limit)
+}
+
+// SetReadOnly makes DB read-only. It will stay read-only until reopened.
+func (db *DB) SetReadOnly() error {
+ if err := db.ok(); err != nil {
+ return err
+ }
+
+ // Lock writer.
+ select {
+ case db.writeLockC <- struct{}{}:
+ db.compWriteLocking = true
+ case err := <-db.compPerErrC:
+ return err
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ // Set compaction read-only.
+ select {
+ case db.compErrSetC <- ErrReadOnly:
+ case perr := <-db.compPerErrC:
+ return perr
+ case <-db.closeC:
+ return ErrClosed
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/doc.go b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
new file mode 100644
index 000000000..be768e573
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/doc.go
@@ -0,0 +1,92 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package leveldb provides implementation of LevelDB key/value database.
+//
+// Create or open a database:
+//
+// // The returned DB instance is safe for concurrent use. Which mean that all
+// // DB's methods may be called concurrently from multiple goroutine.
+// db, err := leveldb.OpenFile("path/to/db", nil)
+// ...
+// defer db.Close()
+// ...
+//
+// Read or modify the database content:
+//
+// // Remember that the contents of the returned slice should not be modified.
+// data, err := db.Get([]byte("key"), nil)
+// ...
+// err = db.Put([]byte("key"), []byte("value"), nil)
+// ...
+// err = db.Delete([]byte("key"), nil)
+// ...
+//
+// Iterate over database content:
+//
+// iter := db.NewIterator(nil, nil)
+// for iter.Next() {
+// // Remember that the contents of the returned slice should not be modified, and
+// // only valid until the next call to Next.
+// key := iter.Key()
+// value := iter.Value()
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content with a particular prefix:
+// iter := db.NewIterator(util.BytesPrefix([]byte("foo-")), nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Seek-then-Iterate:
+//
+// iter := db.NewIterator(nil, nil)
+// for ok := iter.Seek(key); ok; ok = iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Iterate over subset of database content:
+//
+// iter := db.NewIterator(&util.Range{Start: []byte("foo"), Limit: []byte("xoo")}, nil)
+// for iter.Next() {
+// // Use key/value.
+// ...
+// }
+// iter.Release()
+// err = iter.Error()
+// ...
+//
+// Batch writes:
+//
+// batch := new(leveldb.Batch)
+// batch.Put([]byte("foo"), []byte("value"))
+// batch.Put([]byte("bar"), []byte("another value"))
+// batch.Delete([]byte("baz"))
+// err = db.Write(batch, nil)
+// ...
+//
+// Use bloom filter:
+//
+// o := &opt.Options{
+// Filter: filter.NewBloomFilter(10),
+// }
+// db, err := leveldb.OpenFile("path/to/db", o)
+// ...
+// defer db.Close()
+// ...
+package leveldb
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
new file mode 100644
index 000000000..de2649812
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReadOnly = errors.New("leveldb: read-only mode")
+ ErrSnapshotReleased = errors.New("leveldb: snapshot released")
+ ErrIterReleased = errors.New("leveldb: iterator released")
+ ErrClosed = errors.New("leveldb: closed")
+)
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
new file mode 100644
index 000000000..8d6146b6f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/errors/errors.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package errors provides common error types used throughout leveldb.
+package errors
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = New("leveldb: not found")
+ ErrReleased = util.ErrReleased
+ ErrHasReleaser = util.ErrHasReleaser
+)
+
+// New returns an error that formats as the given text.
+func New(text string) error {
+ return errors.New(text)
+}
+
+// ErrCorrupted is the type that wraps errors that indicate corruption in
+// the database.
+type ErrCorrupted struct {
+ Fd storage.FileDesc
+ Err error
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// NewErrCorrupted creates new ErrCorrupted error.
+func NewErrCorrupted(fd storage.FileDesc, err error) error {
+ return &ErrCorrupted{fd, err}
+}
+
+// IsCorrupted returns a boolean indicating whether the error is indicating
+// a corruption.
+func IsCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ case *storage.ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+// ErrMissingFiles is the type that indicating a corruption due to missing
+// files. ErrMissingFiles always wrapped with ErrCorrupted.
+type ErrMissingFiles struct {
+ Fds []storage.FileDesc
+}
+
+func (e *ErrMissingFiles) Error() string { return "file missing" }
+
+// SetFd sets 'file info' of the given error with the given file.
+// Currently only ErrCorrupted is supported, otherwise will do nothing.
+func SetFd(err error, fd storage.FileDesc) error {
+ switch x := err.(type) {
+ case *ErrCorrupted:
+ x.Fd = fd
+ return x
+ }
+ return err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
new file mode 100644
index 000000000..e961e420d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter.go
@@ -0,0 +1,31 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+type iFilter struct {
+ filter.Filter
+}
+
+func (f iFilter) Contains(filter, key []byte) bool {
+ return f.Filter.Contains(filter, internalKey(key).ukey())
+}
+
+func (f iFilter) NewGenerator() filter.FilterGenerator {
+ return iFilterGenerator{f.Filter.NewGenerator()}
+}
+
+type iFilterGenerator struct {
+ filter.FilterGenerator
+}
+
+func (g iFilterGenerator) Add(key []byte) {
+ g.FilterGenerator.Add(internalKey(key).ukey())
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
new file mode 100644
index 000000000..bab0e9970
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/bloom.go
@@ -0,0 +1,116 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package filter
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func bloomHash(key []byte) uint32 {
+ return util.Hash(key, 0xbc9f1d34)
+}
+
+type bloomFilter int
+
+// The bloom filter serializes its parameters and is backward compatible
+// with respect to them. Therefor, its parameters are not added to its
+// name.
+func (bloomFilter) Name() string {
+ return "leveldb.BuiltinBloomFilter"
+}
+
+func (f bloomFilter) Contains(filter, key []byte) bool {
+ nBytes := len(filter) - 1
+ if nBytes < 1 {
+ return false
+ }
+ nBits := uint32(nBytes * 8)
+
+ // Use the encoded k so that we can read filters generated by
+ // bloom filters created using different parameters.
+ k := filter[nBytes]
+ if k > 30 {
+ // Reserved for potentially new encodings for short bloom filters.
+ // Consider it a match.
+ return true
+ }
+
+ kh := bloomHash(key)
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < k; j++ {
+ bitpos := kh % nBits
+ if (uint32(filter[bitpos/8]) & (1 << (bitpos % 8))) == 0 {
+ return false
+ }
+ kh += delta
+ }
+ return true
+}
+
+func (f bloomFilter) NewGenerator() FilterGenerator {
+ // Round down to reduce probing cost a little bit.
+ k := uint8(f * 69 / 100) // 0.69 =~ ln(2)
+ if k < 1 {
+ k = 1
+ } else if k > 30 {
+ k = 30
+ }
+ return &bloomFilterGenerator{
+ n: int(f),
+ k: k,
+ }
+}
+
+type bloomFilterGenerator struct {
+ n int
+ k uint8
+
+ keyHashes []uint32
+}
+
+func (g *bloomFilterGenerator) Add(key []byte) {
+ // Use double-hashing to generate a sequence of hash values.
+ // See analysis in [Kirsch,Mitzenmacher 2006].
+ g.keyHashes = append(g.keyHashes, bloomHash(key))
+}
+
+func (g *bloomFilterGenerator) Generate(b Buffer) {
+ // Compute bloom filter size (in both bits and bytes)
+ nBits := uint32(len(g.keyHashes) * g.n)
+ // For small n, we can see a very high false positive rate. Fix it
+ // by enforcing a minimum bloom filter length.
+ if nBits < 64 {
+ nBits = 64
+ }
+ nBytes := (nBits + 7) / 8
+ nBits = nBytes * 8
+
+ dest := b.Alloc(int(nBytes) + 1)
+ dest[nBytes] = g.k
+ for _, kh := range g.keyHashes {
+ delta := (kh >> 17) | (kh << 15) // Rotate right 17 bits
+ for j := uint8(0); j < g.k; j++ {
+ bitpos := kh % nBits
+ dest[bitpos/8] |= (1 << (bitpos % 8))
+ kh += delta
+ }
+ }
+
+ g.keyHashes = g.keyHashes[:0]
+}
+
+// NewBloomFilter creates a new initialized bloom filter for given
+// bitsPerKey.
+//
+// Since bitsPerKey is persisted individually for each bloom filter
+// serialization, bloom filters are backwards compatible with respect to
+// changing bitsPerKey. This means that no big performance penalty will
+// be experienced when changing the parameter. See documentation for
+// opt.Options.Filter for more information.
+func NewBloomFilter(bitsPerKey int) Filter {
+ return bloomFilter(bitsPerKey)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
new file mode 100644
index 000000000..7a925c5a8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/filter/filter.go
@@ -0,0 +1,60 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package filter provides interface and implementation of probabilistic
+// data structure.
+//
+// The filter is resposible for creating small filter from a set of keys.
+// These filter will then used to test whether a key is a member of the set.
+// In many cases, a filter can cut down the number of disk seeks from a
+// handful to a single disk seek per DB.Get call.
+package filter
+
+// Buffer is the interface that wraps basic Alloc, Write and WriteByte methods.
+type Buffer interface {
+ // Alloc allocs n bytes of slice from the buffer. This also advancing
+ // write offset.
+ Alloc(n int) []byte
+
+ // Write appends the contents of p to the buffer.
+ Write(p []byte) (n int, err error)
+
+ // WriteByte appends the byte c to the buffer.
+ WriteByte(c byte) error
+}
+
+// Filter is the filter.
+type Filter interface {
+ // Name returns the name of this policy.
+ //
+ // Note that if the filter encoding changes in an incompatible way,
+ // the name returned by this method must be changed. Otherwise, old
+ // incompatible filters may be passed to methods of this type.
+ Name() string
+
+ // NewGenerator creates a new filter generator.
+ NewGenerator() FilterGenerator
+
+ // Contains returns true if the filter contains the given key.
+ //
+ // The filter are filters generated by the filter generator.
+ Contains(filter, key []byte) bool
+}
+
+// FilterGenerator is the filter generator.
+type FilterGenerator interface {
+ // Add adds a key to the filter generator.
+ //
+ // The key may become invalid after call to this method end, therefor
+ // key must be copied if implementation require keeping key for later
+ // use. The key should not modified directly, doing so may cause
+ // undefined results.
+ Add(key []byte)
+
+ // Generate generates filters based on keys passed so far. After call
+ // to Generate the filter generator maybe resetted, depends on implementation.
+ Generate(b Buffer)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
new file mode 100644
index 000000000..a23ab05f7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/array_iter.go
@@ -0,0 +1,184 @@
+// Copyright (c) 2014, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// BasicArray is the interface that wraps basic Len and Search method.
+type BasicArray interface {
+ // Len returns length of the array.
+ Len() int
+
+ // Search finds smallest index that point to a key that is greater
+ // than or equal to the given key.
+ Search(key []byte) int
+}
+
+// Array is the interface that wraps BasicArray and basic Index method.
+type Array interface {
+ BasicArray
+
+ // Index returns key/value pair with index of i.
+ Index(i int) (key, value []byte)
+}
+
+// Array is the interface that wraps BasicArray and basic Get method.
+type ArrayIndexer interface {
+ BasicArray
+
+ // Get returns a new data iterator with index of i.
+ Get(i int) Iterator
+}
+
+type basicArrayIterator struct {
+ util.BasicReleaser
+ array BasicArray
+ pos int
+ err error
+}
+
+func (i *basicArrayIterator) Valid() bool {
+ return i.pos >= 0 && i.pos < i.array.Len() && !i.Released()
+}
+
+func (i *basicArrayIterator) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.array.Len() == 0 {
+ i.pos = -1
+ return false
+ }
+ i.pos = 0
+ return true
+}
+
+func (i *basicArrayIterator) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = n - 1
+ return true
+}
+
+func (i *basicArrayIterator) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ n := i.array.Len()
+ if n == 0 {
+ i.pos = 0
+ return false
+ }
+ i.pos = i.array.Search(key)
+ if i.pos >= n {
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos++
+ if n := i.array.Len(); i.pos >= n {
+ i.pos = n
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.pos--
+ if i.pos < 0 {
+ i.pos = -1
+ return false
+ }
+ return true
+}
+
+func (i *basicArrayIterator) Error() error { return i.err }
+
+type arrayIterator struct {
+ basicArrayIterator
+ array Array
+ pos int
+ key, value []byte
+}
+
+func (i *arrayIterator) updateKV() {
+ if i.pos == i.basicArrayIterator.pos {
+ return
+ }
+ i.pos = i.basicArrayIterator.pos
+ if i.Valid() {
+ i.key, i.value = i.array.Index(i.pos)
+ } else {
+ i.key = nil
+ i.value = nil
+ }
+}
+
+func (i *arrayIterator) Key() []byte {
+ i.updateKV()
+ return i.key
+}
+
+func (i *arrayIterator) Value() []byte {
+ i.updateKV()
+ return i.value
+}
+
+type arrayIteratorIndexer struct {
+ basicArrayIterator
+ array ArrayIndexer
+}
+
+func (i *arrayIteratorIndexer) Get() Iterator {
+ if i.Valid() {
+ return i.array.Get(i.basicArrayIterator.pos)
+ }
+ return nil
+}
+
+// NewArrayIterator returns an iterator from the given array.
+func NewArrayIterator(array Array) Iterator {
+ return &arrayIterator{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ pos: -1,
+ }
+}
+
+// NewArrayIndexer returns an index iterator from the given array.
+func NewArrayIndexer(array ArrayIndexer) IteratorIndexer {
+ return &arrayIteratorIndexer{
+ basicArrayIterator: basicArrayIterator{array: array, pos: -1},
+ array: array,
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
new file mode 100644
index 000000000..939adbb93
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/indexed_iter.go
@@ -0,0 +1,242 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// IteratorIndexer is the interface that wraps CommonIterator and basic Get
+// method. IteratorIndexer provides index for indexed iterator.
+type IteratorIndexer interface {
+ CommonIterator
+
+ // Get returns a new data iterator for the current position, or nil if
+ // done.
+ Get() Iterator
+}
+
+type indexedIterator struct {
+ util.BasicReleaser
+ index IteratorIndexer
+ strict bool
+
+ data Iterator
+ err error
+ errf func(err error)
+ closed bool
+}
+
+func (i *indexedIterator) setData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = i.index.Get()
+}
+
+func (i *indexedIterator) clearData() {
+ if i.data != nil {
+ i.data.Release()
+ }
+ i.data = nil
+}
+
+func (i *indexedIterator) indexErr() {
+ if err := i.index.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ i.err = err
+ }
+}
+
+func (i *indexedIterator) dataErr() bool {
+ if err := i.data.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *indexedIterator) Valid() bool {
+ return i.data != nil && i.data.Valid()
+}
+
+func (i *indexedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.First() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ return i.Next()
+}
+
+func (i *indexedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Last() {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ return true
+}
+
+func (i *indexedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if !i.index.Seek(key) {
+ i.indexErr()
+ i.clearData()
+ return false
+ }
+ i.setData()
+ if !i.data.Seek(key) {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Next() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Next():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Next() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ return i.Next()
+ }
+ return true
+}
+
+func (i *indexedIterator) Prev() bool {
+ if i.err != nil {
+ return false
+ } else if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch {
+ case i.data != nil && !i.data.Prev():
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ fallthrough
+ case i.data == nil:
+ if !i.index.Prev() {
+ i.indexErr()
+ return false
+ }
+ i.setData()
+ if !i.data.Last() {
+ if i.dataErr() {
+ return false
+ }
+ i.clearData()
+ return i.Prev()
+ }
+ }
+ return true
+}
+
+func (i *indexedIterator) Key() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Key()
+}
+
+func (i *indexedIterator) Value() []byte {
+ if i.data == nil {
+ return nil
+ }
+ return i.data.Value()
+}
+
+func (i *indexedIterator) Release() {
+ i.clearData()
+ i.index.Release()
+ i.BasicReleaser.Release()
+}
+
+func (i *indexedIterator) Error() error {
+ if i.err != nil {
+ return i.err
+ }
+ if err := i.index.Error(); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (i *indexedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewIndexedIterator returns an 'indexed iterator'. An index is iterator
+// that returns another iterator, a 'data iterator'. A 'data iterator' is the
+// iterator that contains actual key/value pairs.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'indexed iterator', otherwise the iterator will
+// continue to the next 'data iterator'. Corruption on 'index iterator' will not be
+// ignored and will halt the iterator.
+func NewIndexedIterator(index IteratorIndexer, strict bool) Iterator {
+ return &indexedIterator{index: index, strict: strict}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
new file mode 100644
index 000000000..96fb0f685
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/iter.go
@@ -0,0 +1,132 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package iterator provides interface and implementation to traverse over
+// contents of a database.
+package iterator
+
+import (
+ "errors"
+
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+var (
+ ErrIterReleased = errors.New("leveldb/iterator: iterator released")
+)
+
+// IteratorSeeker is the interface that wraps the 'seeks method'.
+type IteratorSeeker interface {
+ // First moves the iterator to the first key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ First() bool
+
+ // Last moves the iterator to the last key/value pair. If the iterator
+ // only contains one key/value pair then First and Last would moves
+ // to the same key/value pair.
+ // It returns whether such pair exist.
+ Last() bool
+
+ // Seek moves the iterator to the first key/value pair whose key is greater
+ // than or equal to the given key.
+ // It returns whether such pair exist.
+ //
+ // It is safe to modify the contents of the argument after Seek returns.
+ Seek(key []byte) bool
+
+ // Next moves the iterator to the next key/value pair.
+ // It returns false if the iterator is exhausted.
+ Next() bool
+
+ // Prev moves the iterator to the previous key/value pair.
+ // It returns false if the iterator is exhausted.
+ Prev() bool
+}
+
+// CommonIterator is the interface that wraps common iterator methods.
+type CommonIterator interface {
+ IteratorSeeker
+
+ // util.Releaser is the interface that wraps basic Release method.
+ // When called Release will releases any resources associated with the
+ // iterator.
+ util.Releaser
+
+ // util.ReleaseSetter is the interface that wraps the basic SetReleaser
+ // method.
+ util.ReleaseSetter
+
+ // TODO: Remove this when ready.
+ Valid() bool
+
+ // Error returns any accumulated error. Exhausting all the key/value pairs
+ // is not considered to be an error.
+ Error() error
+}
+
+// Iterator iterates over a DB's key/value pairs in key order.
+//
+// When encounter an error any 'seeks method' will return false and will
+// yield no key/value pairs. The error can be queried by calling the Error
+// method. Calling Release is still necessary.
+//
+// An iterator must be released after use, but it is not necessary to read
+// an iterator until exhaustion.
+// Also, an iterator is not necessarily safe for concurrent use, but it is
+// safe to use multiple iterators concurrently, with each in a dedicated
+// goroutine.
+type Iterator interface {
+ CommonIterator
+
+ // Key returns the key of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Key() []byte
+
+ // Value returns the value of the current key/value pair, or nil if done.
+ // The caller should not modify the contents of the returned slice, and
+ // its contents may change on the next call to any 'seeks method'.
+ Value() []byte
+}
+
+// ErrorCallbackSetter is the interface that wraps basic SetErrorCallback
+// method.
+//
+// ErrorCallbackSetter implemented by indexed and merged iterator.
+type ErrorCallbackSetter interface {
+ // SetErrorCallback allows set an error callback of the corresponding
+ // iterator. Use nil to clear the callback.
+ SetErrorCallback(f func(err error))
+}
+
+type emptyIterator struct {
+ util.BasicReleaser
+ err error
+}
+
+func (i *emptyIterator) rErr() {
+ if i.err == nil && i.Released() {
+ i.err = ErrIterReleased
+ }
+}
+
+func (*emptyIterator) Valid() bool { return false }
+func (i *emptyIterator) First() bool { i.rErr(); return false }
+func (i *emptyIterator) Last() bool { i.rErr(); return false }
+func (i *emptyIterator) Seek(key []byte) bool { i.rErr(); return false }
+func (i *emptyIterator) Next() bool { i.rErr(); return false }
+func (i *emptyIterator) Prev() bool { i.rErr(); return false }
+func (*emptyIterator) Key() []byte { return nil }
+func (*emptyIterator) Value() []byte { return nil }
+func (i *emptyIterator) Error() error { return i.err }
+
+// NewEmptyIterator creates an empty iterator. The err parameter can be
+// nil, but if not nil the given err will be returned by Error method.
+func NewEmptyIterator(err error) Iterator {
+ return &emptyIterator{err: err}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
new file mode 100644
index 000000000..1a7e29df8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/iterator/merged_iter.go
@@ -0,0 +1,304 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package iterator
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type mergedIterator struct {
+ cmp comparer.Comparer
+ iters []Iterator
+ strict bool
+
+ keys [][]byte
+ index int
+ dir dir
+ err error
+ errf func(err error)
+ releaser util.Releaser
+}
+
+func assertKey(key []byte) []byte {
+ if key == nil {
+ panic("leveldb/iterator: nil key")
+ }
+ return key
+}
+
+func (i *mergedIterator) iterErr(iter Iterator) bool {
+ if err := iter.Error(); err != nil {
+ if i.errf != nil {
+ i.errf(err)
+ }
+ if i.strict || !errors.IsCorrupted(err) {
+ i.err = err
+ return true
+ }
+ }
+ return false
+}
+
+func (i *mergedIterator) Valid() bool {
+ return i.err == nil && i.dir > dirEOI
+}
+
+func (i *mergedIterator) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.First():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirEOI
+ return i.prev()
+}
+
+func (i *mergedIterator) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ for x, iter := range i.iters {
+ switch {
+ case iter.Seek(key):
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ i.dir = dirSOI
+ return i.next()
+}
+
+func (i *mergedIterator) next() bool {
+ var key []byte
+ if i.dir == dirForward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) < 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirEOI
+ return false
+ }
+ i.dir = dirForward
+ return true
+}
+
+func (i *mergedIterator) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirSOI:
+ return i.First()
+ case dirBackward:
+ key := append([]byte{}, i.keys[i.index]...)
+ if !i.Seek(key) {
+ return false
+ }
+ return i.Next()
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Next():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.next()
+}
+
+func (i *mergedIterator) prev() bool {
+ var key []byte
+ if i.dir == dirBackward {
+ key = i.keys[i.index]
+ }
+ for x, tkey := range i.keys {
+ if tkey != nil && (key == nil || i.cmp.Compare(tkey, key) > 0) {
+ key = tkey
+ i.index = x
+ }
+ }
+ if key == nil {
+ i.dir = dirSOI
+ return false
+ }
+ i.dir = dirBackward
+ return true
+}
+
+func (i *mergedIterator) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ switch i.dir {
+ case dirEOI:
+ return i.Last()
+ case dirForward:
+ key := append([]byte{}, i.keys[i.index]...)
+ for x, iter := range i.iters {
+ if x == i.index {
+ continue
+ }
+ seek := iter.Seek(key)
+ switch {
+ case seek && iter.Prev(), !seek && iter.Last():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ }
+ }
+
+ x := i.index
+ iter := i.iters[x]
+ switch {
+ case iter.Prev():
+ i.keys[x] = assertKey(iter.Key())
+ case i.iterErr(iter):
+ return false
+ default:
+ i.keys[x] = nil
+ }
+ return i.prev()
+}
+
+func (i *mergedIterator) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.keys[i.index]
+}
+
+func (i *mergedIterator) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.iters[i.index].Value()
+}
+
+func (i *mergedIterator) Release() {
+ if i.dir != dirReleased {
+ i.dir = dirReleased
+ for _, iter := range i.iters {
+ iter.Release()
+ }
+ i.iters = nil
+ i.keys = nil
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *mergedIterator) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *mergedIterator) Error() error {
+ return i.err
+}
+
+func (i *mergedIterator) SetErrorCallback(f func(err error)) {
+ i.errf = f
+}
+
+// NewMergedIterator returns an iterator that merges its input. Walking the
+// resultant iterator will return all key/value pairs of all input iterators
+// in strictly increasing key order, as defined by cmp.
+// The input's key ranges may overlap, but there are assumed to be no duplicate
+// keys: if iters[i] contains a key k then iters[j] will not contain that key k.
+// None of the iters may be nil.
+//
+// If strict is true the any 'corruption errors' (i.e errors.IsCorrupted(err) == true)
+// won't be ignored and will halt 'merged iterator', otherwise the iterator will
+// continue to the next 'input iterator'.
+func NewMergedIterator(iters []Iterator, cmp comparer.Comparer, strict bool) Iterator {
+ return &mergedIterator{
+ iters: iters,
+ cmp: cmp,
+ strict: strict,
+ keys: make([][]byte, len(iters)),
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
new file mode 100644
index 000000000..d094c3d0f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/journal/journal.go
@@ -0,0 +1,524 @@
+// Copyright 2011 The LevelDB-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Taken from: https://code.google.com/p/leveldb-go/source/browse/leveldb/record/record.go?r=1d5ccbe03246da926391ee12d1c6caae054ff4b0
+// License, authors and contributors informations can be found at bellow URLs respectively:
+// https://code.google.com/p/leveldb-go/source/browse/LICENSE
+// https://code.google.com/p/leveldb-go/source/browse/AUTHORS
+// https://code.google.com/p/leveldb-go/source/browse/CONTRIBUTORS
+
+// Package journal reads and writes sequences of journals. Each journal is a stream
+// of bytes that completes before the next journal starts.
+//
+// When reading, call Next to obtain an io.Reader for the next journal. Next will
+// return io.EOF when there are no more journals. It is valid to call Next
+// without reading the current journal to exhaustion.
+//
+// When writing, call Next to obtain an io.Writer for the next journal. Calling
+// Next finishes the current journal. Call Close to finish the final journal.
+//
+// Optionally, call Flush to finish the current journal and flush the underlying
+// writer without starting a new journal. To start a new journal after flushing,
+// call Next.
+//
+// Neither Readers or Writers are safe to use concurrently.
+//
+// Example code:
+// func read(r io.Reader) ([]string, error) {
+// var ss []string
+// journals := journal.NewReader(r, nil, true, true)
+// for {
+// j, err := journals.Next()
+// if err == io.EOF {
+// break
+// }
+// if err != nil {
+// return nil, err
+// }
+// s, err := ioutil.ReadAll(j)
+// if err != nil {
+// return nil, err
+// }
+// ss = append(ss, string(s))
+// }
+// return ss, nil
+// }
+//
+// func write(w io.Writer, ss []string) error {
+// journals := journal.NewWriter(w)
+// for _, s := range ss {
+// j, err := journals.Next()
+// if err != nil {
+// return err
+// }
+// if _, err := j.Write([]byte(s)), err != nil {
+// return err
+// }
+// }
+// return journals.Close()
+// }
+//
+// The wire format is that the stream is divided into 32KiB blocks, and each
+// block contains a number of tightly packed chunks. Chunks cannot cross block
+// boundaries. The last block may be shorter than 32 KiB. Any unused bytes in a
+// block must be zero.
+//
+// A journal maps to one or more chunks. Each chunk has a 7 byte header (a 4
+// byte checksum, a 2 byte little-endian uint16 length, and a 1 byte chunk type)
+// followed by a payload. The checksum is over the chunk type and the payload.
+//
+// There are four chunk types: whether the chunk is the full journal, or the
+// first, middle or last chunk of a multi-chunk journal. A multi-chunk journal
+// has one first chunk, zero or more middle chunks, and one last chunk.
+//
+// The wire format allows for limited recovery in the face of data corruption:
+// on a format error (such as a checksum mismatch), the reader moves to the
+// next block and looks for the next full or first chunk.
+package journal
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// These constants are part of the wire format and should not be changed.
+const (
+ fullChunkType = 1
+ firstChunkType = 2
+ middleChunkType = 3
+ lastChunkType = 4
+)
+
+const (
+ blockSize = 32 * 1024
+ headerSize = 7
+)
+
+type flusher interface {
+ Flush() error
+}
+
+// ErrCorrupted is the error type that generated by corrupted block or chunk.
+type ErrCorrupted struct {
+ Size int
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/journal: block/chunk corrupted: %s (%d bytes)", e.Reason, e.Size)
+}
+
+// Dropper is the interface that wrap simple Drop method. The Drop
+// method will be called when the journal reader dropping a block or chunk.
+type Dropper interface {
+ Drop(err error)
+}
+
+// Reader reads journals from an underlying io.Reader.
+type Reader struct {
+ // r is the underlying reader.
+ r io.Reader
+ // the dropper.
+ dropper Dropper
+ // strict flag.
+ strict bool
+ // checksum flag.
+ checksum bool
+ // seq is the sequence number of the current journal.
+ seq int
+ // buf[i:j] is the unread portion of the current chunk's payload.
+ // The low bound, i, excludes the chunk header.
+ i, j int
+ // n is the number of bytes of buf that are valid. Once reading has started,
+ // only the final block can have n < blockSize.
+ n int
+ // last is whether the current chunk is the last chunk of the journal.
+ last bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewReader returns a new reader. The dropper may be nil, and if
+// strict is true then corrupted or invalid chunk will halt the journal
+// reader entirely.
+func NewReader(r io.Reader, dropper Dropper, strict, checksum bool) *Reader {
+ return &Reader{
+ r: r,
+ dropper: dropper,
+ strict: strict,
+ checksum: checksum,
+ last: true,
+ }
+}
+
+var errSkip = errors.New("leveldb/journal: skipped")
+
+func (r *Reader) corrupt(n int, reason string, skip bool) error {
+ if r.dropper != nil {
+ r.dropper.Drop(&ErrCorrupted{n, reason})
+ }
+ if r.strict && !skip {
+ r.err = errors.NewErrCorrupted(storage.FileDesc{}, &ErrCorrupted{n, reason})
+ return r.err
+ }
+ return errSkip
+}
+
+// nextChunk sets r.buf[r.i:r.j] to hold the next chunk's payload, reading the
+// next block into the buffer if necessary.
+func (r *Reader) nextChunk(first bool) error {
+ for {
+ if r.j+headerSize <= r.n {
+ checksum := binary.LittleEndian.Uint32(r.buf[r.j+0 : r.j+4])
+ length := binary.LittleEndian.Uint16(r.buf[r.j+4 : r.j+6])
+ chunkType := r.buf[r.j+6]
+ unprocBlock := r.n - r.j
+ if checksum == 0 && length == 0 && chunkType == 0 {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "zero header", false)
+ }
+ if chunkType < fullChunkType || chunkType > lastChunkType {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, fmt.Sprintf("invalid chunk type %#x", chunkType), false)
+ }
+ r.i = r.j + headerSize
+ r.j = r.j + headerSize + int(length)
+ if r.j > r.n {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "chunk length overflows block", false)
+ } else if r.checksum && checksum != util.NewCRC(r.buf[r.i-1:r.j]).Value() {
+ // Drop entire block.
+ r.i = r.n
+ r.j = r.n
+ return r.corrupt(unprocBlock, "checksum mismatch", false)
+ }
+ if first && chunkType != fullChunkType && chunkType != firstChunkType {
+ chunkLength := (r.j - r.i) + headerSize
+ r.i = r.j
+ // Report the error, but skip it.
+ return r.corrupt(chunkLength, "orphan chunk", true)
+ }
+ r.last = chunkType == fullChunkType || chunkType == lastChunkType
+ return nil
+ }
+
+ // The last block.
+ if r.n < blockSize && r.n > 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+
+ // Read block.
+ n, err := io.ReadFull(r.r, r.buf[:])
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ return err
+ }
+ if n == 0 {
+ if !first {
+ return r.corrupt(0, "missing chunk part", false)
+ }
+ r.err = io.EOF
+ return r.err
+ }
+ r.i, r.j, r.n = 0, 0, n
+ }
+}
+
+// Next returns a reader for the next journal. It returns io.EOF if there are no
+// more journals. The reader returned becomes stale after the next Next call,
+// and should no longer be used. If strict is false, the reader will returns
+// io.ErrUnexpectedEOF error when found corrupted journal.
+func (r *Reader) Next() (io.Reader, error) {
+ r.seq++
+ if r.err != nil {
+ return nil, r.err
+ }
+ r.i = r.j
+ for {
+ if err := r.nextChunk(true); err == nil {
+ break
+ } else if err != errSkip {
+ return nil, err
+ }
+ }
+ return &singleReader{r, r.seq, nil}, nil
+}
+
+// Reset resets the journal reader, allows reuse of the journal reader. Reset returns
+// last accumulated error.
+func (r *Reader) Reset(reader io.Reader, dropper Dropper, strict, checksum bool) error {
+ r.seq++
+ err := r.err
+ r.r = reader
+ r.dropper = dropper
+ r.strict = strict
+ r.checksum = checksum
+ r.i = 0
+ r.j = 0
+ r.n = 0
+ r.last = true
+ r.err = nil
+ return err
+}
+
+type singleReader struct {
+ r *Reader
+ seq int
+ err error
+}
+
+func (x *singleReader) Read(p []byte) (int, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ n := copy(p, r.buf[r.i:r.j])
+ r.i += n
+ return n, nil
+}
+
+func (x *singleReader) ReadByte() (byte, error) {
+ r := x.r
+ if r.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale reader")
+ }
+ if x.err != nil {
+ return 0, x.err
+ }
+ if r.err != nil {
+ return 0, r.err
+ }
+ for r.i == r.j {
+ if r.last {
+ return 0, io.EOF
+ }
+ x.err = r.nextChunk(false)
+ if x.err != nil {
+ if x.err == errSkip {
+ x.err = io.ErrUnexpectedEOF
+ }
+ return 0, x.err
+ }
+ }
+ c := r.buf[r.i]
+ r.i++
+ return c, nil
+}
+
+// Writer writes journals to an underlying io.Writer.
+type Writer struct {
+ // w is the underlying writer.
+ w io.Writer
+ // seq is the sequence number of the current journal.
+ seq int
+ // f is w as a flusher.
+ f flusher
+ // buf[i:j] is the bytes that will become the current chunk.
+ // The low bound, i, includes the chunk header.
+ i, j int
+ // buf[:written] has already been written to w.
+ // written is zero unless Flush has been called.
+ written int
+ // first is whether the current chunk is the first chunk of the journal.
+ first bool
+ // pending is whether a chunk is buffered but not yet written.
+ pending bool
+ // err is any accumulated error.
+ err error
+ // buf is the buffer.
+ buf [blockSize]byte
+}
+
+// NewWriter returns a new Writer.
+func NewWriter(w io.Writer) *Writer {
+ f, _ := w.(flusher)
+ return &Writer{
+ w: w,
+ f: f,
+ }
+}
+
+// fillHeader fills in the header for the pending chunk.
+func (w *Writer) fillHeader(last bool) {
+ if w.i+headerSize > w.j || w.j > blockSize {
+ panic("leveldb/journal: bad writer state")
+ }
+ if last {
+ if w.first {
+ w.buf[w.i+6] = fullChunkType
+ } else {
+ w.buf[w.i+6] = lastChunkType
+ }
+ } else {
+ if w.first {
+ w.buf[w.i+6] = firstChunkType
+ } else {
+ w.buf[w.i+6] = middleChunkType
+ }
+ }
+ binary.LittleEndian.PutUint32(w.buf[w.i+0:w.i+4], util.NewCRC(w.buf[w.i+6:w.j]).Value())
+ binary.LittleEndian.PutUint16(w.buf[w.i+4:w.i+6], uint16(w.j-w.i-headerSize))
+}
+
+// writeBlock writes the buffered block to the underlying writer, and reserves
+// space for the next chunk's header.
+func (w *Writer) writeBlock() {
+ _, w.err = w.w.Write(w.buf[w.written:])
+ w.i = 0
+ w.j = headerSize
+ w.written = 0
+}
+
+// writePending finishes the current journal and writes the buffer to the
+// underlying writer.
+func (w *Writer) writePending() {
+ if w.err != nil {
+ return
+ }
+ if w.pending {
+ w.fillHeader(true)
+ w.pending = false
+ }
+ _, w.err = w.w.Write(w.buf[w.written:w.j])
+ w.written = w.j
+}
+
+// Close finishes the current journal and closes the writer.
+func (w *Writer) Close() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ w.err = errors.New("leveldb/journal: closed Writer")
+ return nil
+}
+
+// Flush finishes the current journal, writes to the underlying writer, and
+// flushes it if that writer implements interface{ Flush() error }.
+func (w *Writer) Flush() error {
+ w.seq++
+ w.writePending()
+ if w.err != nil {
+ return w.err
+ }
+ if w.f != nil {
+ w.err = w.f.Flush()
+ return w.err
+ }
+ return nil
+}
+
+// Reset resets the journal writer, allows reuse of the journal writer. Reset
+// will also closes the journal writer if not already.
+func (w *Writer) Reset(writer io.Writer) (err error) {
+ w.seq++
+ if w.err == nil {
+ w.writePending()
+ err = w.err
+ }
+ w.w = writer
+ w.f, _ = writer.(flusher)
+ w.i = 0
+ w.j = 0
+ w.written = 0
+ w.first = false
+ w.pending = false
+ w.err = nil
+ return
+}
+
+// Next returns a writer for the next journal. The writer returned becomes stale
+// after the next Close, Flush or Next call, and should no longer be used.
+func (w *Writer) Next() (io.Writer, error) {
+ w.seq++
+ if w.err != nil {
+ return nil, w.err
+ }
+ if w.pending {
+ w.fillHeader(true)
+ }
+ w.i = w.j
+ w.j = w.j + headerSize
+ // Check if there is room in the block for the header.
+ if w.j > blockSize {
+ // Fill in the rest of the block with zeroes.
+ for k := w.i; k < blockSize; k++ {
+ w.buf[k] = 0
+ }
+ w.writeBlock()
+ if w.err != nil {
+ return nil, w.err
+ }
+ }
+ w.first = true
+ w.pending = true
+ return singleWriter{w, w.seq}, nil
+}
+
+type singleWriter struct {
+ w *Writer
+ seq int
+}
+
+func (x singleWriter) Write(p []byte) (int, error) {
+ w := x.w
+ if w.seq != x.seq {
+ return 0, errors.New("leveldb/journal: stale writer")
+ }
+ if w.err != nil {
+ return 0, w.err
+ }
+ n0 := len(p)
+ for len(p) > 0 {
+ // Write a block, if it is full.
+ if w.j == blockSize {
+ w.fillHeader(false)
+ w.writeBlock()
+ if w.err != nil {
+ return 0, w.err
+ }
+ w.first = false
+ }
+ // Copy bytes into the buffer.
+ n := copy(w.buf[w.j:], p)
+ w.j += n
+ p = p[n:]
+ }
+ return n0, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/key.go b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
new file mode 100644
index 000000000..ad8f51ec8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/key.go
@@ -0,0 +1,143 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrInternalKeyCorrupted records internal key corruption.
+type ErrInternalKeyCorrupted struct {
+ Ikey []byte
+ Reason string
+}
+
+func (e *ErrInternalKeyCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: internal key %q corrupted: %s", e.Ikey, e.Reason)
+}
+
+func newErrInternalKeyCorrupted(ikey []byte, reason string) error {
+ return errors.NewErrCorrupted(storage.FileDesc{}, &ErrInternalKeyCorrupted{append([]byte{}, ikey...), reason})
+}
+
+type keyType uint
+
+func (kt keyType) String() string {
+ switch kt {
+ case keyTypeDel:
+ return "d"
+ case keyTypeVal:
+ return "v"
+ }
+ return fmt.Sprintf("", uint(kt))
+}
+
+// Value types encoded as the last component of internal keys.
+// Don't modify; this value are saved to disk.
+const (
+ keyTypeDel = keyType(0)
+ keyTypeVal = keyType(1)
+)
+
+// keyTypeSeek defines the keyType that should be passed when constructing an
+// internal key for seeking to a particular sequence number (since we
+// sort sequence numbers in decreasing order and the value type is
+// embedded as the low 8 bits in the sequence number in internal keys,
+// we need to use the highest-numbered ValueType, not the lowest).
+const keyTypeSeek = keyTypeVal
+
+const (
+ // Maximum value possible for sequence number; the 8-bits are
+ // used by value type, so its can packed together in single
+ // 64-bit integer.
+ keyMaxSeq = (uint64(1) << 56) - 1
+ // Maximum value possible for packed sequence number and type.
+ keyMaxNum = (keyMaxSeq << 8) | uint64(keyTypeSeek)
+)
+
+// Maximum number encoded in bytes.
+var keyMaxNumBytes = make([]byte, 8)
+
+func init() {
+ binary.LittleEndian.PutUint64(keyMaxNumBytes, keyMaxNum)
+}
+
+type internalKey []byte
+
+func makeInternalKey(dst, ukey []byte, seq uint64, kt keyType) internalKey {
+ if seq > keyMaxSeq {
+ panic("leveldb: invalid sequence number")
+ } else if kt > keyTypeVal {
+ panic("leveldb: invalid type")
+ }
+
+ dst = ensureBuffer(dst, len(ukey)+8)
+ copy(dst, ukey)
+ binary.LittleEndian.PutUint64(dst[len(ukey):], (seq<<8)|uint64(kt))
+ return internalKey(dst)
+}
+
+func parseInternalKey(ik []byte) (ukey []byte, seq uint64, kt keyType, err error) {
+ if len(ik) < 8 {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid length")
+ }
+ num := binary.LittleEndian.Uint64(ik[len(ik)-8:])
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ return nil, 0, 0, newErrInternalKeyCorrupted(ik, "invalid type")
+ }
+ ukey = ik[:len(ik)-8]
+ return
+}
+
+func validInternalKey(ik []byte) bool {
+ _, _, _, err := parseInternalKey(ik)
+ return err == nil
+}
+
+func (ik internalKey) assert() {
+ if ik == nil {
+ panic("leveldb: nil internalKey")
+ }
+ if len(ik) < 8 {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid length", []byte(ik), len(ik)))
+ }
+}
+
+func (ik internalKey) ukey() []byte {
+ ik.assert()
+ return ik[:len(ik)-8]
+}
+
+func (ik internalKey) num() uint64 {
+ ik.assert()
+ return binary.LittleEndian.Uint64(ik[len(ik)-8:])
+}
+
+func (ik internalKey) parseNum() (seq uint64, kt keyType) {
+ num := ik.num()
+ seq, kt = uint64(num>>8), keyType(num&0xff)
+ if kt > keyTypeVal {
+ panic(fmt.Sprintf("leveldb: internal key %q, len=%d: invalid type %#x", []byte(ik), len(ik), kt))
+ }
+ return
+}
+
+func (ik internalKey) String() string {
+ if ik == nil {
+ return ""
+ }
+
+ if ukey, seq, kt, err := parseInternalKey(ik); err == nil {
+ return fmt.Sprintf("%s,%s%d", shorten(string(ukey)), kt, seq)
+ }
+ return fmt.Sprintf("", []byte(ik))
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
new file mode 100644
index 000000000..b661c08a9
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/memdb/memdb.go
@@ -0,0 +1,475 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package memdb provides in-memory key/value database implementation.
+package memdb
+
+import (
+ "math/rand"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Common errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrIterReleased = errors.New("leveldb/memdb: iterator released")
+)
+
+const tMaxHeight = 12
+
+type dbIter struct {
+ util.BasicReleaser
+ p *DB
+ slice *util.Range
+ node int
+ forward bool
+ key, value []byte
+ err error
+}
+
+func (i *dbIter) fill(checkStart, checkLimit bool) bool {
+ if i.node != 0 {
+ n := i.p.nodeData[i.node]
+ m := n + i.p.nodeData[i.node+nKey]
+ i.key = i.p.kvData[n:m]
+ if i.slice != nil {
+ switch {
+ case checkLimit && i.slice.Limit != nil && i.p.cmp.Compare(i.key, i.slice.Limit) >= 0:
+ fallthrough
+ case checkStart && i.slice.Start != nil && i.p.cmp.Compare(i.key, i.slice.Start) < 0:
+ i.node = 0
+ goto bail
+ }
+ }
+ i.value = i.p.kvData[m : m+i.p.nodeData[i.node+nVal]]
+ return true
+ }
+bail:
+ i.key = nil
+ i.value = nil
+ return false
+}
+
+func (i *dbIter) Valid() bool {
+ return i.node != 0
+}
+
+func (i *dbIter) First() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil {
+ i.node, _ = i.p.findGE(i.slice.Start, false)
+ } else {
+ i.node = i.p.nodeData[nNext]
+ }
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Last() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Limit != nil {
+ i.node = i.p.findLT(i.slice.Limit)
+ } else {
+ i.node = i.p.findLast()
+ }
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Seek(key []byte) bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ if i.slice != nil && i.slice.Start != nil && i.p.cmp.Compare(key, i.slice.Start) < 0 {
+ key = i.slice.Start
+ }
+ i.node, _ = i.p.findGE(key, false)
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Next() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if !i.forward {
+ return i.First()
+ }
+ return false
+ }
+ i.forward = true
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.nodeData[i.node+nNext]
+ return i.fill(false, true)
+}
+
+func (i *dbIter) Prev() bool {
+ if i.Released() {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.node == 0 {
+ if i.forward {
+ return i.Last()
+ }
+ return false
+ }
+ i.forward = false
+ i.p.mu.RLock()
+ defer i.p.mu.RUnlock()
+ i.node = i.p.findLT(i.key)
+ return i.fill(true, false)
+}
+
+func (i *dbIter) Key() []byte {
+ return i.key
+}
+
+func (i *dbIter) Value() []byte {
+ return i.value
+}
+
+func (i *dbIter) Error() error { return i.err }
+
+func (i *dbIter) Release() {
+ if !i.Released() {
+ i.p = nil
+ i.node = 0
+ i.key = nil
+ i.value = nil
+ i.BasicReleaser.Release()
+ }
+}
+
+const (
+ nKV = iota
+ nKey
+ nVal
+ nHeight
+ nNext
+)
+
+// DB is an in-memory key/value database.
+type DB struct {
+ cmp comparer.BasicComparer
+ rnd *rand.Rand
+
+ mu sync.RWMutex
+ kvData []byte
+ // Node data:
+ // [0] : KV offset
+ // [1] : Key length
+ // [2] : Value length
+ // [3] : Height
+ // [3..height] : Next nodes
+ nodeData []int
+ prevNode [tMaxHeight]int
+ maxHeight int
+ n int
+ kvSize int
+}
+
+func (p *DB) randHeight() (h int) {
+ const branching = 4
+ h = 1
+ for h < tMaxHeight && p.rnd.Int()%branching == 0 {
+ h++
+ }
+ return
+}
+
+// Must hold RW-lock if prev == true, as it use shared prevNode slice.
+func (p *DB) findGE(key []byte, prev bool) (int, bool) {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ cmp := 1
+ if next != 0 {
+ o := p.nodeData[next]
+ cmp = p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key)
+ }
+ if cmp < 0 {
+ // Keep searching in this list
+ node = next
+ } else {
+ if prev {
+ p.prevNode[h] = node
+ } else if cmp == 0 {
+ return next, true
+ }
+ if h == 0 {
+ return next, cmp == 0
+ }
+ h--
+ }
+ }
+}
+
+func (p *DB) findLT(key []byte) int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ o := p.nodeData[next]
+ if next == 0 || p.cmp.Compare(p.kvData[o:o+p.nodeData[next+nKey]], key) >= 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+func (p *DB) findLast() int {
+ node := 0
+ h := p.maxHeight - 1
+ for {
+ next := p.nodeData[node+nNext+h]
+ if next == 0 {
+ if h == 0 {
+ break
+ }
+ h--
+ } else {
+ node = next
+ }
+ }
+ return node
+}
+
+// Put sets the value for the given key. It overwrites any previous value
+// for that key; a DB is not a multi-map.
+//
+// It is safe to modify the contents of the arguments after Put returns.
+func (p *DB) Put(key []byte, value []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ if node, exact := p.findGE(key, true); exact {
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ p.nodeData[node] = kvOffset
+ m := p.nodeData[node+nVal]
+ p.nodeData[node+nVal] = len(value)
+ p.kvSize += len(value) - m
+ return nil
+ }
+
+ h := p.randHeight()
+ if h > p.maxHeight {
+ for i := p.maxHeight; i < h; i++ {
+ p.prevNode[i] = 0
+ }
+ p.maxHeight = h
+ }
+
+ kvOffset := len(p.kvData)
+ p.kvData = append(p.kvData, key...)
+ p.kvData = append(p.kvData, value...)
+ // Node
+ node := len(p.nodeData)
+ p.nodeData = append(p.nodeData, kvOffset, len(key), len(value), h)
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData = append(p.nodeData, p.nodeData[m])
+ p.nodeData[m] = node
+ }
+
+ p.kvSize += len(key) + len(value)
+ p.n++
+ return nil
+}
+
+// Delete deletes the value for the given key. It returns ErrNotFound if
+// the DB does not contain the key.
+//
+// It is safe to modify the contents of the arguments after Delete returns.
+func (p *DB) Delete(key []byte) error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+
+ node, exact := p.findGE(key, true)
+ if !exact {
+ return ErrNotFound
+ }
+
+ h := p.nodeData[node+nHeight]
+ for i, n := range p.prevNode[:h] {
+ m := n + nNext + i
+ p.nodeData[m] = p.nodeData[p.nodeData[m]+nNext+i]
+ }
+
+ p.kvSize -= p.nodeData[node+nKey] + p.nodeData[node+nVal]
+ p.n--
+ return nil
+}
+
+// Contains returns true if the given key are in the DB.
+//
+// It is safe to modify the contents of the arguments after Contains returns.
+func (p *DB) Contains(key []byte) bool {
+ p.mu.RLock()
+ _, exact := p.findGE(key, false)
+ p.mu.RUnlock()
+ return exact
+}
+
+// Get gets the value for the given key. It returns error.ErrNotFound if the
+// DB does not contain the key.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Get returns.
+func (p *DB) Get(key []byte) (value []byte, err error) {
+ p.mu.RLock()
+ if node, exact := p.findGE(key, false); exact {
+ o := p.nodeData[node] + p.nodeData[node+nKey]
+ value = p.kvData[o : o+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+//
+// The caller should not modify the contents of the returned slice, but
+// it is safe to modify the contents of the argument after Find returns.
+func (p *DB) Find(key []byte) (rkey, value []byte, err error) {
+ p.mu.RLock()
+ if node, _ := p.findGE(key, false); node != 0 {
+ n := p.nodeData[node]
+ m := n + p.nodeData[node+nKey]
+ rkey = p.kvData[n:m]
+ value = p.kvData[m : m+p.nodeData[node+nVal]]
+ } else {
+ err = ErrNotFound
+ }
+ p.mu.RUnlock()
+ return
+}
+
+// NewIterator returns an iterator of the DB.
+// The returned iterator is not safe for concurrent use, but it is safe to use
+// multiple iterators concurrently, with each in a dedicated goroutine.
+// It is also safe to use an iterator concurrently with modifying its
+// underlying DB. However, the resultant key/value pairs are not guaranteed
+// to be a consistent snapshot of the DB at a particular point in time.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// DB. And a nil Range.Limit is treated as a key after all keys in
+// the DB.
+//
+// The iterator must be released after use, by calling Release method.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (p *DB) NewIterator(slice *util.Range) iterator.Iterator {
+ return &dbIter{p: p, slice: slice}
+}
+
+// Capacity returns keys/values buffer capacity.
+func (p *DB) Capacity() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData)
+}
+
+// Size returns sum of keys and values length. Note that deleted
+// key/value will not be accounted for, but it will still consume
+// the buffer, since the buffer is append only.
+func (p *DB) Size() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.kvSize
+}
+
+// Free returns keys/values free buffer before need to grow.
+func (p *DB) Free() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return cap(p.kvData) - len(p.kvData)
+}
+
+// Len returns the number of entries in the DB.
+func (p *DB) Len() int {
+ p.mu.RLock()
+ defer p.mu.RUnlock()
+ return p.n
+}
+
+// Reset resets the DB to initial empty state. Allows reuse the buffer.
+func (p *DB) Reset() {
+ p.mu.Lock()
+ p.rnd = rand.New(rand.NewSource(0xdeadbeef))
+ p.maxHeight = 1
+ p.n = 0
+ p.kvSize = 0
+ p.kvData = p.kvData[:0]
+ p.nodeData = p.nodeData[:nNext+tMaxHeight]
+ p.nodeData[nKV] = 0
+ p.nodeData[nKey] = 0
+ p.nodeData[nVal] = 0
+ p.nodeData[nHeight] = tMaxHeight
+ for n := 0; n < tMaxHeight; n++ {
+ p.nodeData[nNext+n] = 0
+ p.prevNode[n] = 0
+ }
+ p.mu.Unlock()
+}
+
+// New creates a new initialized in-memory key/value DB. The capacity
+// is the initial key/value buffer capacity. The capacity is advisory,
+// not enforced.
+//
+// This DB is append-only, deleting an entry would remove entry node but not
+// reclaim KV buffer.
+//
+// The returned DB instance is safe for concurrent use.
+func New(cmp comparer.BasicComparer, capacity int) *DB {
+ p := &DB{
+ cmp: cmp,
+ rnd: rand.New(rand.NewSource(0xdeadbeef)),
+ maxHeight: 1,
+ kvData: make([]byte, 0, capacity),
+ nodeData: make([]int, 4+tMaxHeight),
+ }
+ p.nodeData[nHeight] = tMaxHeight
+ return p
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
new file mode 100644
index 000000000..44e7d9adc
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/opt/options.go
@@ -0,0 +1,684 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package opt provides sets of options used by LevelDB.
+package opt
+
+import (
+ "math"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+)
+
+const (
+ KiB = 1024
+ MiB = KiB * 1024
+ GiB = MiB * 1024
+)
+
+var (
+ DefaultBlockCacher = LRUCacher
+ DefaultBlockCacheCapacity = 8 * MiB
+ DefaultBlockRestartInterval = 16
+ DefaultBlockSize = 4 * KiB
+ DefaultCompactionExpandLimitFactor = 25
+ DefaultCompactionGPOverlapsFactor = 10
+ DefaultCompactionL0Trigger = 4
+ DefaultCompactionSourceLimitFactor = 1
+ DefaultCompactionTableSize = 2 * MiB
+ DefaultCompactionTableSizeMultiplier = 1.0
+ DefaultCompactionTotalSize = 10 * MiB
+ DefaultCompactionTotalSizeMultiplier = 10.0
+ DefaultCompressionType = SnappyCompression
+ DefaultIteratorSamplingRate = 1 * MiB
+ DefaultOpenFilesCacher = LRUCacher
+ DefaultOpenFilesCacheCapacity = 500
+ DefaultWriteBuffer = 4 * MiB
+ DefaultWriteL0PauseTrigger = 12
+ DefaultWriteL0SlowdownTrigger = 8
+)
+
+// Cacher is a caching algorithm.
+type Cacher interface {
+ New(capacity int) cache.Cacher
+}
+
+type CacherFunc struct {
+ NewFunc func(capacity int) cache.Cacher
+}
+
+func (f *CacherFunc) New(capacity int) cache.Cacher {
+ if f.NewFunc != nil {
+ return f.NewFunc(capacity)
+ }
+ return nil
+}
+
+func noCacher(int) cache.Cacher { return nil }
+
+var (
+ // LRUCacher is the LRU-cache algorithm.
+ LRUCacher = &CacherFunc{cache.NewLRU}
+
+ // NoCacher is the value to disable caching algorithm.
+ NoCacher = &CacherFunc{}
+)
+
+// Compression is the 'sorted table' block compression algorithm to use.
+type Compression uint
+
+func (c Compression) String() string {
+ switch c {
+ case DefaultCompression:
+ return "default"
+ case NoCompression:
+ return "none"
+ case SnappyCompression:
+ return "snappy"
+ }
+ return "invalid"
+}
+
+const (
+ DefaultCompression Compression = iota
+ NoCompression
+ SnappyCompression
+ nCompression
+)
+
+// Strict is the DB 'strict level'.
+type Strict uint
+
+const (
+ // If present then a corrupted or invalid chunk or block in manifest
+ // journal will cause an error instead of being dropped.
+ // This will prevent database with corrupted manifest to be opened.
+ StrictManifest Strict = 1 << iota
+
+ // If present then journal chunk checksum will be verified.
+ StrictJournalChecksum
+
+ // If present then a corrupted or invalid chunk or block in journal
+ // will cause an error instead of being dropped.
+ // This will prevent database with corrupted journal to be opened.
+ StrictJournal
+
+ // If present then 'sorted table' block checksum will be verified.
+ // This has effect on both 'read operation' and compaction.
+ StrictBlockChecksum
+
+ // If present then a corrupted 'sorted table' will fails compaction.
+ // The database will enter read-only mode.
+ StrictCompaction
+
+ // If present then a corrupted 'sorted table' will halts 'read operation'.
+ StrictReader
+
+ // If present then leveldb.Recover will drop corrupted 'sorted table'.
+ StrictRecovery
+
+ // This only applicable for ReadOptions, if present then this ReadOptions
+ // 'strict level' will override global ones.
+ StrictOverride
+
+ // StrictAll enables all strict flags.
+ StrictAll = StrictManifest | StrictJournalChecksum | StrictJournal | StrictBlockChecksum | StrictCompaction | StrictReader | StrictRecovery
+
+ // DefaultStrict is the default strict flags. Specify any strict flags
+ // will override default strict flags as whole (i.e. not OR'ed).
+ DefaultStrict = StrictJournalChecksum | StrictBlockChecksum | StrictCompaction | StrictReader
+
+ // NoStrict disables all strict flags. Override default strict flags.
+ NoStrict = ^StrictAll
+)
+
+// Options holds the optional parameters for the DB at large.
+type Options struct {
+ // AltFilters defines one or more 'alternative filters'.
+ // 'alternative filters' will be used during reads if a filter block
+ // does not match with the 'effective filter'.
+ //
+ // The default value is nil
+ AltFilters []filter.Filter
+
+ // BlockCacher provides cache algorithm for LevelDB 'sorted table' block caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ BlockCacher Cacher
+
+ // BlockCacheCapacity defines the capacity of the 'sorted table' block caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to BlockCacher.
+ //
+ // The default value is 8MiB.
+ BlockCacheCapacity int
+
+ // BlockRestartInterval is the number of keys between restart points for
+ // delta encoding of keys.
+ //
+ // The default value is 16.
+ BlockRestartInterval int
+
+ // BlockSize is the minimum uncompressed size in bytes of each 'sorted table'
+ // block.
+ //
+ // The default value is 4KiB.
+ BlockSize int
+
+ // CompactionExpandLimitFactor limits compaction size after expanded.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 25.
+ CompactionExpandLimitFactor int
+
+ // CompactionGPOverlapsFactor limits overlaps in grandparent (Level + 2) that a
+ // single 'sorted table' generates.
+ // This will be multiplied by table size limit at grandparent level.
+ //
+ // The default value is 10.
+ CompactionGPOverlapsFactor int
+
+ // CompactionL0Trigger defines number of 'sorted table' at level-0 that will
+ // trigger compaction.
+ //
+ // The default value is 4.
+ CompactionL0Trigger int
+
+ // CompactionSourceLimitFactor limits compaction source size. This doesn't apply to
+ // level-0.
+ // This will be multiplied by table size limit at compaction target level.
+ //
+ // The default value is 1.
+ CompactionSourceLimitFactor int
+
+ // CompactionTableSize limits size of 'sorted table' that compaction generates.
+ // The limits for each level will be calculated as:
+ // CompactionTableSize * (CompactionTableSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using CompactionTableSizeMultiplierPerLevel.
+ //
+ // The default value is 2MiB.
+ CompactionTableSize int
+
+ // CompactionTableSizeMultiplier defines multiplier for CompactionTableSize.
+ //
+ // The default value is 1.
+ CompactionTableSizeMultiplier float64
+
+ // CompactionTableSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTableSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTableSizeMultiplierPerLevel []float64
+
+ // CompactionTotalSize limits total size of 'sorted table' for each level.
+ // The limits for each level will be calculated as:
+ // CompactionTotalSize * (CompactionTotalSizeMultiplier ^ Level)
+ // The multiplier for each level can also fine-tuned using
+ // CompactionTotalSizeMultiplierPerLevel.
+ //
+ // The default value is 10MiB.
+ CompactionTotalSize int
+
+ // CompactionTotalSizeMultiplier defines multiplier for CompactionTotalSize.
+ //
+ // The default value is 10.
+ CompactionTotalSizeMultiplier float64
+
+ // CompactionTotalSizeMultiplierPerLevel defines per-level multiplier for
+ // CompactionTotalSize.
+ // Use zero to skip a level.
+ //
+ // The default value is nil.
+ CompactionTotalSizeMultiplierPerLevel []float64
+
+ // Comparer defines a total ordering over the space of []byte keys: a 'less
+ // than' relationship. The same comparison algorithm must be used for reads
+ // and writes over the lifetime of the DB.
+ //
+ // The default value uses the same ordering as bytes.Compare.
+ Comparer comparer.Comparer
+
+ // Compression defines the 'sorted table' block compression to use.
+ //
+ // The default value (DefaultCompression) uses snappy compression.
+ Compression Compression
+
+ // DisableBufferPool allows disable use of util.BufferPool functionality.
+ //
+ // The default value is false.
+ DisableBufferPool bool
+
+ // DisableBlockCache allows disable use of cache.Cache functionality on
+ // 'sorted table' block.
+ //
+ // The default value is false.
+ DisableBlockCache bool
+
+ // DisableCompactionBackoff allows disable compaction retry backoff.
+ //
+ // The default value is false.
+ DisableCompactionBackoff bool
+
+ // DisableLargeBatchTransaction allows disabling switch-to-transaction mode
+ // on large batch write. If enable batch writes large than WriteBuffer will
+ // use transaction.
+ //
+ // The default is false.
+ DisableLargeBatchTransaction bool
+
+ // ErrorIfExist defines whether an error should returned if the DB already
+ // exist.
+ //
+ // The default value is false.
+ ErrorIfExist bool
+
+ // ErrorIfMissing defines whether an error should returned if the DB is
+ // missing. If false then the database will be created if missing, otherwise
+ // an error will be returned.
+ //
+ // The default value is false.
+ ErrorIfMissing bool
+
+ // Filter defines an 'effective filter' to use. An 'effective filter'
+ // if defined will be used to generate per-table filter block.
+ // The filter name will be stored on disk.
+ // During reads LevelDB will try to find matching filter from
+ // 'effective filter' and 'alternative filters'.
+ //
+ // Filter can be changed after a DB has been created. It is recommended
+ // to put old filter to the 'alternative filters' to mitigate lack of
+ // filter during transition period.
+ //
+ // A filter is used to reduce disk reads when looking for a specific key.
+ //
+ // The default value is nil.
+ Filter filter.Filter
+
+ // IteratorSamplingRate defines approximate gap (in bytes) between read
+ // sampling of an iterator. The samples will be used to determine when
+ // compaction should be triggered.
+ //
+ // The default is 1MiB.
+ IteratorSamplingRate int
+
+ // NoSync allows completely disable fsync.
+ //
+ // The default is false.
+ NoSync bool
+
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // OpenFilesCacher provides cache algorithm for open files caching.
+ // Specify NoCacher to disable caching algorithm.
+ //
+ // The default value is LRUCacher.
+ OpenFilesCacher Cacher
+
+ // OpenFilesCacheCapacity defines the capacity of the open files caching.
+ // Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.
+ //
+ // The default value is 500.
+ OpenFilesCacheCapacity int
+
+ // If true then opens DB in read-only mode.
+ //
+ // The default value is false.
+ ReadOnly bool
+
+ // Strict defines the DB strict level.
+ Strict Strict
+
+ // WriteBuffer defines maximum size of a 'memdb' before flushed to
+ // 'sorted table'. 'memdb' is an in-memory DB backed by an on-disk
+ // unsorted journal.
+ //
+ // LevelDB may held up to two 'memdb' at the same time.
+ //
+ // The default value is 4MiB.
+ WriteBuffer int
+
+ // WriteL0StopTrigger defines number of 'sorted table' at level-0 that will
+ // pause write.
+ //
+ // The default value is 12.
+ WriteL0PauseTrigger int
+
+ // WriteL0SlowdownTrigger defines number of 'sorted table' at level-0 that
+ // will trigger write slowdown.
+ //
+ // The default value is 8.
+ WriteL0SlowdownTrigger int
+}
+
+func (o *Options) GetAltFilters() []filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.AltFilters
+}
+
+func (o *Options) GetBlockCacher() Cacher {
+ if o == nil || o.BlockCacher == nil {
+ return DefaultBlockCacher
+ } else if o.BlockCacher == NoCacher {
+ return nil
+ }
+ return o.BlockCacher
+}
+
+func (o *Options) GetBlockCacheCapacity() int {
+ if o == nil || o.BlockCacheCapacity == 0 {
+ return DefaultBlockCacheCapacity
+ } else if o.BlockCacheCapacity < 0 {
+ return 0
+ }
+ return o.BlockCacheCapacity
+}
+
+func (o *Options) GetBlockRestartInterval() int {
+ if o == nil || o.BlockRestartInterval <= 0 {
+ return DefaultBlockRestartInterval
+ }
+ return o.BlockRestartInterval
+}
+
+func (o *Options) GetBlockSize() int {
+ if o == nil || o.BlockSize <= 0 {
+ return DefaultBlockSize
+ }
+ return o.BlockSize
+}
+
+func (o *Options) GetCompactionExpandLimit(level int) int {
+ factor := DefaultCompactionExpandLimitFactor
+ if o != nil && o.CompactionExpandLimitFactor > 0 {
+ factor = o.CompactionExpandLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionGPOverlaps(level int) int {
+ factor := DefaultCompactionGPOverlapsFactor
+ if o != nil && o.CompactionGPOverlapsFactor > 0 {
+ factor = o.CompactionGPOverlapsFactor
+ }
+ return o.GetCompactionTableSize(level+2) * factor
+}
+
+func (o *Options) GetCompactionL0Trigger() int {
+ if o == nil || o.CompactionL0Trigger == 0 {
+ return DefaultCompactionL0Trigger
+ }
+ return o.CompactionL0Trigger
+}
+
+func (o *Options) GetCompactionSourceLimit(level int) int {
+ factor := DefaultCompactionSourceLimitFactor
+ if o != nil && o.CompactionSourceLimitFactor > 0 {
+ factor = o.CompactionSourceLimitFactor
+ }
+ return o.GetCompactionTableSize(level+1) * factor
+}
+
+func (o *Options) GetCompactionTableSize(level int) int {
+ var (
+ base = DefaultCompactionTableSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTableSize > 0 {
+ base = o.CompactionTableSize
+ }
+ if level < len(o.CompactionTableSizeMultiplierPerLevel) && o.CompactionTableSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTableSizeMultiplierPerLevel[level]
+ } else if o.CompactionTableSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTableSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTableSizeMultiplier, float64(level))
+ }
+ return int(float64(base) * mult)
+}
+
+func (o *Options) GetCompactionTotalSize(level int) int64 {
+ var (
+ base = DefaultCompactionTotalSize
+ mult float64
+ )
+ if o != nil {
+ if o.CompactionTotalSize > 0 {
+ base = o.CompactionTotalSize
+ }
+ if level < len(o.CompactionTotalSizeMultiplierPerLevel) && o.CompactionTotalSizeMultiplierPerLevel[level] > 0 {
+ mult = o.CompactionTotalSizeMultiplierPerLevel[level]
+ } else if o.CompactionTotalSizeMultiplier > 0 {
+ mult = math.Pow(o.CompactionTotalSizeMultiplier, float64(level))
+ }
+ }
+ if mult == 0 {
+ mult = math.Pow(DefaultCompactionTotalSizeMultiplier, float64(level))
+ }
+ return int64(float64(base) * mult)
+}
+
+func (o *Options) GetComparer() comparer.Comparer {
+ if o == nil || o.Comparer == nil {
+ return comparer.DefaultComparer
+ }
+ return o.Comparer
+}
+
+func (o *Options) GetCompression() Compression {
+ if o == nil || o.Compression <= DefaultCompression || o.Compression >= nCompression {
+ return DefaultCompressionType
+ }
+ return o.Compression
+}
+
+func (o *Options) GetDisableBufferPool() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBufferPool
+}
+
+func (o *Options) GetDisableBlockCache() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableBlockCache
+}
+
+func (o *Options) GetDisableCompactionBackoff() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableCompactionBackoff
+}
+
+func (o *Options) GetDisableLargeBatchTransaction() bool {
+ if o == nil {
+ return false
+ }
+ return o.DisableLargeBatchTransaction
+}
+
+func (o *Options) GetErrorIfExist() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfExist
+}
+
+func (o *Options) GetErrorIfMissing() bool {
+ if o == nil {
+ return false
+ }
+ return o.ErrorIfMissing
+}
+
+func (o *Options) GetFilter() filter.Filter {
+ if o == nil {
+ return nil
+ }
+ return o.Filter
+}
+
+func (o *Options) GetIteratorSamplingRate() int {
+ if o == nil || o.IteratorSamplingRate <= 0 {
+ return DefaultIteratorSamplingRate
+ }
+ return o.IteratorSamplingRate
+}
+
+func (o *Options) GetNoSync() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoSync
+}
+
+func (o *Options) GetNoWriteMerge() bool {
+ if o == nil {
+ return false
+ }
+ return o.NoWriteMerge
+}
+
+func (o *Options) GetOpenFilesCacher() Cacher {
+ if o == nil || o.OpenFilesCacher == nil {
+ return DefaultOpenFilesCacher
+ }
+ if o.OpenFilesCacher == NoCacher {
+ return nil
+ }
+ return o.OpenFilesCacher
+}
+
+func (o *Options) GetOpenFilesCacheCapacity() int {
+ if o == nil || o.OpenFilesCacheCapacity == 0 {
+ return DefaultOpenFilesCacheCapacity
+ } else if o.OpenFilesCacheCapacity < 0 {
+ return 0
+ }
+ return o.OpenFilesCacheCapacity
+}
+
+func (o *Options) GetReadOnly() bool {
+ if o == nil {
+ return false
+ }
+ return o.ReadOnly
+}
+
+func (o *Options) GetStrict(strict Strict) bool {
+ if o == nil || o.Strict == 0 {
+ return DefaultStrict&strict != 0
+ }
+ return o.Strict&strict != 0
+}
+
+func (o *Options) GetWriteBuffer() int {
+ if o == nil || o.WriteBuffer <= 0 {
+ return DefaultWriteBuffer
+ }
+ return o.WriteBuffer
+}
+
+func (o *Options) GetWriteL0PauseTrigger() int {
+ if o == nil || o.WriteL0PauseTrigger == 0 {
+ return DefaultWriteL0PauseTrigger
+ }
+ return o.WriteL0PauseTrigger
+}
+
+func (o *Options) GetWriteL0SlowdownTrigger() int {
+ if o == nil || o.WriteL0SlowdownTrigger == 0 {
+ return DefaultWriteL0SlowdownTrigger
+ }
+ return o.WriteL0SlowdownTrigger
+}
+
+// ReadOptions holds the optional parameters for 'read operation'. The
+// 'read operation' includes Get, Find and NewIterator.
+type ReadOptions struct {
+ // DontFillCache defines whether block reads for this 'read operation'
+ // should be cached. If false then the block will be cached. This does
+ // not affects already cached block.
+ //
+ // The default value is false.
+ DontFillCache bool
+
+ // Strict will be OR'ed with global DB 'strict level' unless StrictOverride
+ // is present. Currently only StrictReader that has effect here.
+ Strict Strict
+}
+
+func (ro *ReadOptions) GetDontFillCache() bool {
+ if ro == nil {
+ return false
+ }
+ return ro.DontFillCache
+}
+
+func (ro *ReadOptions) GetStrict(strict Strict) bool {
+ if ro == nil {
+ return false
+ }
+ return ro.Strict&strict != 0
+}
+
+// WriteOptions holds the optional parameters for 'write operation'. The
+// 'write operation' includes Write, Put and Delete.
+type WriteOptions struct {
+ // NoWriteMerge allows disabling write merge.
+ //
+ // The default is false.
+ NoWriteMerge bool
+
+ // Sync is whether to sync underlying writes from the OS buffer cache
+ // through to actual disk, if applicable. Setting Sync can result in
+ // slower writes.
+ //
+ // If false, and the machine crashes, then some recent writes may be lost.
+ // Note that if it is just the process that crashes (and the machine does
+ // not) then no writes will be lost.
+ //
+ // In other words, Sync being false has the same semantics as a write
+ // system call. Sync being true means write followed by fsync.
+ //
+ // The default value is false.
+ Sync bool
+}
+
+func (wo *WriteOptions) GetNoWriteMerge() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.NoWriteMerge
+}
+
+func (wo *WriteOptions) GetSync() bool {
+ if wo == nil {
+ return false
+ }
+ return wo.Sync
+}
+
+func GetStrict(o *Options, ro *ReadOptions, strict Strict) bool {
+ if ro.GetStrict(StrictOverride) {
+ return ro.GetStrict(strict)
+ } else {
+ return o.GetStrict(strict) || ro.GetStrict(strict)
+ }
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/options.go b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
new file mode 100644
index 000000000..b072b1ac4
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/options.go
@@ -0,0 +1,107 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func dupOptions(o *opt.Options) *opt.Options {
+ newo := &opt.Options{}
+ if o != nil {
+ *newo = *o
+ }
+ if newo.Strict == 0 {
+ newo.Strict = opt.DefaultStrict
+ }
+ return newo
+}
+
+func (s *session) setOptions(o *opt.Options) {
+ no := dupOptions(o)
+ // Alternative filters.
+ if filters := o.GetAltFilters(); len(filters) > 0 {
+ no.AltFilters = make([]filter.Filter, len(filters))
+ for i, filter := range filters {
+ no.AltFilters[i] = &iFilter{filter}
+ }
+ }
+ // Comparer.
+ s.icmp = &iComparer{o.GetComparer()}
+ no.Comparer = s.icmp
+ // Filter.
+ if filter := o.GetFilter(); filter != nil {
+ no.Filter = &iFilter{filter}
+ }
+
+ s.o = &cachedOptions{Options: no}
+ s.o.cache()
+}
+
+const optCachedLevel = 7
+
+type cachedOptions struct {
+ *opt.Options
+
+ compactionExpandLimit []int
+ compactionGPOverlaps []int
+ compactionSourceLimit []int
+ compactionTableSize []int
+ compactionTotalSize []int64
+}
+
+func (co *cachedOptions) cache() {
+ co.compactionExpandLimit = make([]int, optCachedLevel)
+ co.compactionGPOverlaps = make([]int, optCachedLevel)
+ co.compactionSourceLimit = make([]int, optCachedLevel)
+ co.compactionTableSize = make([]int, optCachedLevel)
+ co.compactionTotalSize = make([]int64, optCachedLevel)
+
+ for level := 0; level < optCachedLevel; level++ {
+ co.compactionExpandLimit[level] = co.Options.GetCompactionExpandLimit(level)
+ co.compactionGPOverlaps[level] = co.Options.GetCompactionGPOverlaps(level)
+ co.compactionSourceLimit[level] = co.Options.GetCompactionSourceLimit(level)
+ co.compactionTableSize[level] = co.Options.GetCompactionTableSize(level)
+ co.compactionTotalSize[level] = co.Options.GetCompactionTotalSize(level)
+ }
+}
+
+func (co *cachedOptions) GetCompactionExpandLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionExpandLimit[level]
+ }
+ return co.Options.GetCompactionExpandLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionGPOverlaps(level int) int {
+ if level < optCachedLevel {
+ return co.compactionGPOverlaps[level]
+ }
+ return co.Options.GetCompactionGPOverlaps(level)
+}
+
+func (co *cachedOptions) GetCompactionSourceLimit(level int) int {
+ if level < optCachedLevel {
+ return co.compactionSourceLimit[level]
+ }
+ return co.Options.GetCompactionSourceLimit(level)
+}
+
+func (co *cachedOptions) GetCompactionTableSize(level int) int {
+ if level < optCachedLevel {
+ return co.compactionTableSize[level]
+ }
+ return co.Options.GetCompactionTableSize(level)
+}
+
+func (co *cachedOptions) GetCompactionTotalSize(level int) int64 {
+ if level < optCachedLevel {
+ return co.compactionTotalSize[level]
+ }
+ return co.Options.GetCompactionTotalSize(level)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session.go b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
new file mode 100644
index 000000000..3f391f934
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "sync"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// ErrManifestCorrupted records manifest corruption. This error will be
+// wrapped with errors.ErrCorrupted.
+type ErrManifestCorrupted struct {
+ Field string
+ Reason string
+}
+
+func (e *ErrManifestCorrupted) Error() string {
+ return fmt.Sprintf("leveldb: manifest corrupted (field '%s'): %s", e.Field, e.Reason)
+}
+
+func newErrManifestCorrupted(fd storage.FileDesc, field, reason string) error {
+ return errors.NewErrCorrupted(fd, &ErrManifestCorrupted{field, reason})
+}
+
+// session represent a persistent database session.
+type session struct {
+ // Need 64-bit alignment.
+ stNextFileNum int64 // current unused file number
+ stJournalNum int64 // current journal file number; need external synchronization
+ stPrevJournalNum int64 // prev journal file number; no longer used; for compatibility with older version of leveldb
+ stTempFileNum int64
+ stSeqNum uint64 // last mem compacted seq; need external synchronization
+
+ stor *iStorage
+ storLock storage.Locker
+ o *cachedOptions
+ icmp *iComparer
+ tops *tOps
+ fileRef map[int64]int
+
+ manifest *journal.Writer
+ manifestWriter storage.Writer
+ manifestFd storage.FileDesc
+
+ stCompPtrs []internalKey // compaction pointers; need external synchronization
+ stVersion *version // current version
+ vmu sync.Mutex
+}
+
+// Creates new initialized session instance.
+func newSession(stor storage.Storage, o *opt.Options) (s *session, err error) {
+ if stor == nil {
+ return nil, os.ErrInvalid
+ }
+ storLock, err := stor.Lock()
+ if err != nil {
+ return
+ }
+ s = &session{
+ stor: newIStorage(stor),
+ storLock: storLock,
+ fileRef: make(map[int64]int),
+ }
+ s.setOptions(o)
+ s.tops = newTableOps(s)
+ s.setVersion(newVersion(s))
+ s.log("log@legend F·NumFile S·FileSize N·Entry C·BadEntry B·BadBlock Ke·KeyError D·DroppedEntry L·Level Q·SeqNum T·TimeElapsed")
+ return
+}
+
+// Close session.
+func (s *session) close() {
+ s.tops.close()
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ s.manifest = nil
+ s.manifestWriter = nil
+ s.setVersion(&version{s: s, closing: true})
+}
+
+// Release session lock.
+func (s *session) release() {
+ s.storLock.Unlock()
+}
+
+// Create a new database session; need external synchronization.
+func (s *session) create() error {
+ // create manifest
+ return s.newManifest(nil, nil)
+}
+
+// Recover a database session; need external synchronization.
+func (s *session) recover() (err error) {
+ defer func() {
+ if os.IsNotExist(err) {
+ // Don't return os.ErrNotExist if the underlying storage contains
+ // other files that belong to LevelDB. So the DB won't get trashed.
+ if fds, _ := s.stor.List(storage.TypeAll); len(fds) > 0 {
+ err = &errors.ErrCorrupted{Fd: storage.FileDesc{Type: storage.TypeManifest}, Err: &errors.ErrMissingFiles{}}
+ }
+ }
+ }()
+
+ fd, err := s.stor.GetMeta()
+ if err != nil {
+ return
+ }
+
+ reader, err := s.stor.Open(fd)
+ if err != nil {
+ return
+ }
+ defer reader.Close()
+
+ var (
+ // Options.
+ strict = s.o.GetStrict(opt.StrictManifest)
+
+ jr = journal.NewReader(reader, dropper{s, fd}, strict, true)
+ rec = &sessionRecord{}
+ staging = s.stVersion.newStaging()
+ )
+ for {
+ var r io.Reader
+ r, err = jr.Next()
+ if err != nil {
+ if err == io.EOF {
+ err = nil
+ break
+ }
+ return errors.SetFd(err, fd)
+ }
+
+ err = rec.decode(r)
+ if err == nil {
+ // save compact pointers
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+ // commit record to version staging
+ staging.commit(rec)
+ } else {
+ err = errors.SetFd(err, fd)
+ if strict || !errors.IsCorrupted(err) {
+ return
+ }
+ s.logf("manifest error: %v (skipped)", errors.SetFd(err, fd))
+ }
+ rec.resetCompPtrs()
+ rec.resetAddedTables()
+ rec.resetDeletedTables()
+ }
+
+ switch {
+ case !rec.has(recComparer):
+ return newErrManifestCorrupted(fd, "comparer", "missing")
+ case rec.comparer != s.icmp.uName():
+ return newErrManifestCorrupted(fd, "comparer", fmt.Sprintf("mismatch: want '%s', got '%s'", s.icmp.uName(), rec.comparer))
+ case !rec.has(recNextFileNum):
+ return newErrManifestCorrupted(fd, "next-file-num", "missing")
+ case !rec.has(recJournalNum):
+ return newErrManifestCorrupted(fd, "journal-file-num", "missing")
+ case !rec.has(recSeqNum):
+ return newErrManifestCorrupted(fd, "seq-num", "missing")
+ }
+
+ s.manifestFd = fd
+ s.setVersion(staging.finish())
+ s.setNextFileNum(rec.nextFileNum)
+ s.recordCommited(rec)
+ return nil
+}
+
+// Commit session; need external synchronization.
+func (s *session) commit(r *sessionRecord) (err error) {
+ v := s.version()
+ defer v.release()
+
+ // spawn new version based on current version
+ nv := v.spawn(r)
+
+ if s.manifest == nil {
+ // manifest journal writer not yet created, create one
+ err = s.newManifest(r, nv)
+ } else {
+ err = s.flushManifest(r)
+ }
+
+ // finally, apply new version if no error rise
+ if err == nil {
+ s.setVersion(nv)
+ }
+
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
new file mode 100644
index 000000000..089cd00b2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_compaction.go
@@ -0,0 +1,302 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/memdb"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+)
+
+func (s *session) pickMemdbLevel(umin, umax []byte, maxLevel int) int {
+ v := s.version()
+ defer v.release()
+ return v.pickMemdbLevel(umin, umax, maxLevel)
+}
+
+func (s *session) flushMemdb(rec *sessionRecord, mdb *memdb.DB, maxLevel int) (int, error) {
+ // Create sorted table.
+ iter := mdb.NewIterator(nil)
+ defer iter.Release()
+ t, n, err := s.tops.createFrom(iter)
+ if err != nil {
+ return 0, err
+ }
+
+ // Pick level other than zero can cause compaction issue with large
+ // bulk insert and delete on strictly incrementing key-space. The
+ // problem is that the small deletion markers trapped at lower level,
+ // while key/value entries keep growing at higher level. Since the
+ // key-space is strictly incrementing it will not overlaps with
+ // higher level, thus maximum possible level is always picked, while
+ // overlapping deletion marker pushed into lower level.
+ // See: https://github.com/syndtr/goleveldb/issues/127.
+ flushLevel := s.pickMemdbLevel(t.imin.ukey(), t.imax.ukey(), maxLevel)
+ rec.addTableFile(flushLevel, t)
+
+ s.logf("memdb@flush created L%d@%d N·%d S·%s %q:%q", flushLevel, t.fd.Num, n, shortenb(int(t.size)), t.imin, t.imax)
+ return flushLevel, nil
+}
+
+// Pick a compaction based on current state; need external synchronization.
+func (s *session) pickCompaction() *compaction {
+ v := s.version()
+
+ var sourceLevel int
+ var t0 tFiles
+ if v.cScore >= 1 {
+ sourceLevel = v.cLevel
+ cptr := s.getCompPtr(sourceLevel)
+ tables := v.levels[sourceLevel]
+ for _, t := range tables {
+ if cptr == nil || s.icmp.Compare(t.imax, cptr) > 0 {
+ t0 = append(t0, t)
+ break
+ }
+ }
+ if len(t0) == 0 {
+ t0 = append(t0, tables[0])
+ }
+ } else {
+ if p := atomic.LoadPointer(&v.cSeek); p != nil {
+ ts := (*tSet)(p)
+ sourceLevel = ts.level
+ t0 = append(t0, ts.table)
+ } else {
+ v.release()
+ return nil
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+// Create compaction from given level and range; need external synchronization.
+func (s *session) getCompactionRange(sourceLevel int, umin, umax []byte, noLimit bool) *compaction {
+ v := s.version()
+
+ if sourceLevel >= len(v.levels) {
+ v.release()
+ return nil
+ }
+
+ t0 := v.levels[sourceLevel].getOverlaps(nil, s.icmp, umin, umax, sourceLevel == 0)
+ if len(t0) == 0 {
+ v.release()
+ return nil
+ }
+
+ // Avoid compacting too much in one shot in case the range is large.
+ // But we cannot do this for level-0 since level-0 files can overlap
+ // and we must not pick one file and drop another older file if the
+ // two files overlap.
+ if !noLimit && sourceLevel > 0 {
+ limit := int64(v.s.o.GetCompactionSourceLimit(sourceLevel))
+ total := int64(0)
+ for i, t := range t0 {
+ total += t.size
+ if total >= limit {
+ s.logf("table@compaction limiting F·%d -> F·%d", len(t0), i+1)
+ t0 = t0[:i+1]
+ break
+ }
+ }
+ }
+
+ return newCompaction(s, v, sourceLevel, t0)
+}
+
+func newCompaction(s *session, v *version, sourceLevel int, t0 tFiles) *compaction {
+ c := &compaction{
+ s: s,
+ v: v,
+ sourceLevel: sourceLevel,
+ levels: [2]tFiles{t0, nil},
+ maxGPOverlaps: int64(s.o.GetCompactionGPOverlaps(sourceLevel)),
+ tPtrs: make([]int, len(v.levels)),
+ }
+ c.expand()
+ c.save()
+ return c
+}
+
+// compaction represent a compaction state.
+type compaction struct {
+ s *session
+ v *version
+
+ sourceLevel int
+ levels [2]tFiles
+ maxGPOverlaps int64
+
+ gp tFiles
+ gpi int
+ seenKey bool
+ gpOverlappedBytes int64
+ imin, imax internalKey
+ tPtrs []int
+ released bool
+
+ snapGPI int
+ snapSeenKey bool
+ snapGPOverlappedBytes int64
+ snapTPtrs []int
+}
+
+func (c *compaction) save() {
+ c.snapGPI = c.gpi
+ c.snapSeenKey = c.seenKey
+ c.snapGPOverlappedBytes = c.gpOverlappedBytes
+ c.snapTPtrs = append(c.snapTPtrs[:0], c.tPtrs...)
+}
+
+func (c *compaction) restore() {
+ c.gpi = c.snapGPI
+ c.seenKey = c.snapSeenKey
+ c.gpOverlappedBytes = c.snapGPOverlappedBytes
+ c.tPtrs = append(c.tPtrs[:0], c.snapTPtrs...)
+}
+
+func (c *compaction) release() {
+ if !c.released {
+ c.released = true
+ c.v.release()
+ }
+}
+
+// Expand compacted tables; need external synchronization.
+func (c *compaction) expand() {
+ limit := int64(c.s.o.GetCompactionExpandLimit(c.sourceLevel))
+ vt0 := c.v.levels[c.sourceLevel]
+ vt1 := tFiles{}
+ if level := c.sourceLevel + 1; level < len(c.v.levels) {
+ vt1 = c.v.levels[level]
+ }
+
+ t0, t1 := c.levels[0], c.levels[1]
+ imin, imax := t0.getRange(c.s.icmp)
+ // We expand t0 here just incase ukey hop across tables.
+ t0 = vt0.getOverlaps(t0, c.s.icmp, imin.ukey(), imax.ukey(), c.sourceLevel == 0)
+ if len(t0) != len(c.levels[0]) {
+ imin, imax = t0.getRange(c.s.icmp)
+ }
+ t1 = vt1.getOverlaps(t1, c.s.icmp, imin.ukey(), imax.ukey(), false)
+ // Get entire range covered by compaction.
+ amin, amax := append(t0, t1...).getRange(c.s.icmp)
+
+ // See if we can grow the number of inputs in "sourceLevel" without
+ // changing the number of "sourceLevel+1" files we pick up.
+ if len(t1) > 0 {
+ exp0 := vt0.getOverlaps(nil, c.s.icmp, amin.ukey(), amax.ukey(), c.sourceLevel == 0)
+ if len(exp0) > len(t0) && t1.size()+exp0.size() < limit {
+ xmin, xmax := exp0.getRange(c.s.icmp)
+ exp1 := vt1.getOverlaps(nil, c.s.icmp, xmin.ukey(), xmax.ukey(), false)
+ if len(exp1) == len(t1) {
+ c.s.logf("table@compaction expanding L%d+L%d (F·%d S·%s)+(F·%d S·%s) -> (F·%d S·%s)+(F·%d S·%s)",
+ c.sourceLevel, c.sourceLevel+1, len(t0), shortenb(int(t0.size())), len(t1), shortenb(int(t1.size())),
+ len(exp0), shortenb(int(exp0.size())), len(exp1), shortenb(int(exp1.size())))
+ imin, imax = xmin, xmax
+ t0, t1 = exp0, exp1
+ amin, amax = append(t0, t1...).getRange(c.s.icmp)
+ }
+ }
+ }
+
+ // Compute the set of grandparent files that overlap this compaction
+ // (parent == sourceLevel+1; grandparent == sourceLevel+2)
+ if level := c.sourceLevel + 2; level < len(c.v.levels) {
+ c.gp = c.v.levels[level].getOverlaps(c.gp, c.s.icmp, amin.ukey(), amax.ukey(), false)
+ }
+
+ c.levels[0], c.levels[1] = t0, t1
+ c.imin, c.imax = imin, imax
+}
+
+// Check whether compaction is trivial.
+func (c *compaction) trivial() bool {
+ return len(c.levels[0]) == 1 && len(c.levels[1]) == 0 && c.gp.size() <= c.maxGPOverlaps
+}
+
+func (c *compaction) baseLevelForKey(ukey []byte) bool {
+ for level := c.sourceLevel + 2; level < len(c.v.levels); level++ {
+ tables := c.v.levels[level]
+ for c.tPtrs[level] < len(tables) {
+ t := tables[c.tPtrs[level]]
+ if c.s.icmp.uCompare(ukey, t.imax.ukey()) <= 0 {
+ // We've advanced far enough.
+ if c.s.icmp.uCompare(ukey, t.imin.ukey()) >= 0 {
+ // Key falls in this file's range, so definitely not base level.
+ return false
+ }
+ break
+ }
+ c.tPtrs[level]++
+ }
+ }
+ return true
+}
+
+func (c *compaction) shouldStopBefore(ikey internalKey) bool {
+ for ; c.gpi < len(c.gp); c.gpi++ {
+ gp := c.gp[c.gpi]
+ if c.s.icmp.Compare(ikey, gp.imax) <= 0 {
+ break
+ }
+ if c.seenKey {
+ c.gpOverlappedBytes += gp.size
+ }
+ }
+ c.seenKey = true
+
+ if c.gpOverlappedBytes > c.maxGPOverlaps {
+ // Too much overlap for current output; start new output.
+ c.gpOverlappedBytes = 0
+ return true
+ }
+ return false
+}
+
+// Creates an iterator.
+func (c *compaction) newIterator() iterator.Iterator {
+ // Creates iterator slice.
+ icap := len(c.levels)
+ if c.sourceLevel == 0 {
+ // Special case for level-0.
+ icap = len(c.levels[0]) + 1
+ }
+ its := make([]iterator.Iterator, 0, icap)
+
+ // Options.
+ ro := &opt.ReadOptions{
+ DontFillCache: true,
+ Strict: opt.StrictOverride,
+ }
+ strict := c.s.o.GetStrict(opt.StrictCompaction)
+ if strict {
+ ro.Strict |= opt.StrictReader
+ }
+
+ for i, tables := range c.levels {
+ if len(tables) == 0 {
+ continue
+ }
+
+ // Level-0 is not sorted and may overlaps each other.
+ if c.sourceLevel+i == 0 {
+ for _, t := range tables {
+ its = append(its, c.s.tops.newIterator(t, nil, ro))
+ }
+ } else {
+ it := iterator.NewIndexedIterator(tables.newIndexIterator(c.s.tops, c.s.icmp, nil, ro), strict)
+ its = append(its, it)
+ }
+ }
+
+ return iterator.NewMergedIterator(its, c.s.icmp, strict)
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
new file mode 100644
index 000000000..854e1aa6f
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/session_record.go
@@ -0,0 +1,323 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "bufio"
+ "encoding/binary"
+ "io"
+ "strings"
+
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+type byteReader interface {
+ io.Reader
+ io.ByteReader
+}
+
+// These numbers are written to disk and should not be changed.
+const (
+ recComparer = 1
+ recJournalNum = 2
+ recNextFileNum = 3
+ recSeqNum = 4
+ recCompPtr = 5
+ recDelTable = 6
+ recAddTable = 7
+ // 8 was used for large value refs
+ recPrevJournalNum = 9
+)
+
+type cpRecord struct {
+ level int
+ ikey internalKey
+}
+
+type atRecord struct {
+ level int
+ num int64
+ size int64
+ imin internalKey
+ imax internalKey
+}
+
+type dtRecord struct {
+ level int
+ num int64
+}
+
+type sessionRecord struct {
+ hasRec int
+ comparer string
+ journalNum int64
+ prevJournalNum int64
+ nextFileNum int64
+ seqNum uint64
+ compPtrs []cpRecord
+ addedTables []atRecord
+ deletedTables []dtRecord
+
+ scratch [binary.MaxVarintLen64]byte
+ err error
+}
+
+func (p *sessionRecord) has(rec int) bool {
+ return p.hasRec&(1<
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/journal"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+)
+
+// Logging.
+
+type dropper struct {
+ s *session
+ fd storage.FileDesc
+}
+
+func (d dropper) Drop(err error) {
+ if e, ok := err.(*journal.ErrCorrupted); ok {
+ d.s.logf("journal@drop %s-%d S·%s %q", d.fd.Type, d.fd.Num, shortenb(e.Size), e.Reason)
+ } else {
+ d.s.logf("journal@drop %s-%d %q", d.fd.Type, d.fd.Num, err)
+ }
+}
+
+func (s *session) log(v ...interface{}) { s.stor.Log(fmt.Sprint(v...)) }
+func (s *session) logf(format string, v ...interface{}) { s.stor.Log(fmt.Sprintf(format, v...)) }
+
+// File utils.
+
+func (s *session) newTemp() storage.FileDesc {
+ num := atomic.AddInt64(&s.stTempFileNum, 1) - 1
+ return storage.FileDesc{storage.TypeTemp, num}
+}
+
+func (s *session) addFileRef(fd storage.FileDesc, ref int) int {
+ ref += s.fileRef[fd.Num]
+ if ref > 0 {
+ s.fileRef[fd.Num] = ref
+ } else if ref == 0 {
+ delete(s.fileRef, fd.Num)
+ } else {
+ panic(fmt.Sprintf("negative ref: %v", fd))
+ }
+ return ref
+}
+
+// Session state.
+
+// Get current version. This will incr version ref, must call
+// version.release (exactly once) after use.
+func (s *session) version() *version {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ s.stVersion.incref()
+ return s.stVersion
+}
+
+func (s *session) tLen(level int) int {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ return s.stVersion.tLen(level)
+}
+
+// Set current version to v.
+func (s *session) setVersion(v *version) {
+ s.vmu.Lock()
+ defer s.vmu.Unlock()
+ // Hold by session. It is important to call this first before releasing
+ // current version, otherwise the still used files might get released.
+ v.incref()
+ if s.stVersion != nil {
+ // Release current version.
+ s.stVersion.releaseNB()
+ }
+ s.stVersion = v
+}
+
+// Get current unused file number.
+func (s *session) nextFileNum() int64 {
+ return atomic.LoadInt64(&s.stNextFileNum)
+}
+
+// Set current unused file number to num.
+func (s *session) setNextFileNum(num int64) {
+ atomic.StoreInt64(&s.stNextFileNum, num)
+}
+
+// Mark file number as used.
+func (s *session) markFileNum(num int64) {
+ nextFileNum := num + 1
+ for {
+ old, x := s.stNextFileNum, nextFileNum
+ if old > x {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Allocate a file number.
+func (s *session) allocFileNum() int64 {
+ return atomic.AddInt64(&s.stNextFileNum, 1) - 1
+}
+
+// Reuse given file number.
+func (s *session) reuseFileNum(num int64) {
+ for {
+ old, x := s.stNextFileNum, num
+ if old != x+1 {
+ x = old
+ }
+ if atomic.CompareAndSwapInt64(&s.stNextFileNum, old, x) {
+ break
+ }
+ }
+}
+
+// Set compaction ptr at given level; need external synchronization.
+func (s *session) setCompPtr(level int, ik internalKey) {
+ if level >= len(s.stCompPtrs) {
+ newCompPtrs := make([]internalKey, level+1)
+ copy(newCompPtrs, s.stCompPtrs)
+ s.stCompPtrs = newCompPtrs
+ }
+ s.stCompPtrs[level] = append(internalKey{}, ik...)
+}
+
+// Get compaction ptr at given level; need external synchronization.
+func (s *session) getCompPtr(level int) internalKey {
+ if level >= len(s.stCompPtrs) {
+ return nil
+ }
+ return s.stCompPtrs[level]
+}
+
+// Manifest related utils.
+
+// Fill given session record obj with current states; need external
+// synchronization.
+func (s *session) fillRecord(r *sessionRecord, snapshot bool) {
+ r.setNextFileNum(s.nextFileNum())
+
+ if snapshot {
+ if !r.has(recJournalNum) {
+ r.setJournalNum(s.stJournalNum)
+ }
+
+ if !r.has(recSeqNum) {
+ r.setSeqNum(s.stSeqNum)
+ }
+
+ for level, ik := range s.stCompPtrs {
+ if ik != nil {
+ r.addCompPtr(level, ik)
+ }
+ }
+
+ r.setComparer(s.icmp.uName())
+ }
+}
+
+// Mark if record has been committed, this will update session state;
+// need external synchronization.
+func (s *session) recordCommited(rec *sessionRecord) {
+ if rec.has(recJournalNum) {
+ s.stJournalNum = rec.journalNum
+ }
+
+ if rec.has(recPrevJournalNum) {
+ s.stPrevJournalNum = rec.prevJournalNum
+ }
+
+ if rec.has(recSeqNum) {
+ s.stSeqNum = rec.seqNum
+ }
+
+ for _, r := range rec.compPtrs {
+ s.setCompPtr(r.level, internalKey(r.ikey))
+ }
+}
+
+// Create a new manifest file; need external synchronization.
+func (s *session) newManifest(rec *sessionRecord, v *version) (err error) {
+ fd := storage.FileDesc{storage.TypeManifest, s.allocFileNum()}
+ writer, err := s.stor.Create(fd)
+ if err != nil {
+ return
+ }
+ jw := journal.NewWriter(writer)
+
+ if v == nil {
+ v = s.version()
+ defer v.release()
+ }
+ if rec == nil {
+ rec = &sessionRecord{}
+ }
+ s.fillRecord(rec, true)
+ v.fillRecord(rec)
+
+ defer func() {
+ if err == nil {
+ s.recordCommited(rec)
+ if s.manifest != nil {
+ s.manifest.Close()
+ }
+ if s.manifestWriter != nil {
+ s.manifestWriter.Close()
+ }
+ if !s.manifestFd.Zero() {
+ s.stor.Remove(s.manifestFd)
+ }
+ s.manifestFd = fd
+ s.manifestWriter = writer
+ s.manifest = jw
+ } else {
+ writer.Close()
+ s.stor.Remove(fd)
+ s.reuseFileNum(fd.Num)
+ }
+ }()
+
+ w, err := jw.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = jw.Flush()
+ if err != nil {
+ return
+ }
+ err = s.stor.SetMeta(fd)
+ return
+}
+
+// Flush record to disk.
+func (s *session) flushManifest(rec *sessionRecord) (err error) {
+ s.fillRecord(rec, false)
+ w, err := s.manifest.Next()
+ if err != nil {
+ return
+ }
+ err = rec.encode(w)
+ if err != nil {
+ return
+ }
+ err = s.manifest.Flush()
+ if err != nil {
+ return
+ }
+ if !s.o.GetNoSync() {
+ err = s.manifestWriter.Sync()
+ if err != nil {
+ return
+ }
+ }
+ s.recordCommited(rec)
+ return
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
new file mode 100644
index 000000000..d45fb5dfe
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage.go
@@ -0,0 +1,63 @@
+package leveldb
+
+import (
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "sync/atomic"
+)
+
+type iStorage struct {
+ storage.Storage
+ read uint64
+ write uint64
+}
+
+func (c *iStorage) Open(fd storage.FileDesc) (storage.Reader, error) {
+ r, err := c.Storage.Open(fd)
+ return &iStorageReader{r, c}, err
+}
+
+func (c *iStorage) Create(fd storage.FileDesc) (storage.Writer, error) {
+ w, err := c.Storage.Create(fd)
+ return &iStorageWriter{w, c}, err
+}
+
+func (c *iStorage) reads() uint64 {
+ return atomic.LoadUint64(&c.read)
+}
+
+func (c *iStorage) writes() uint64 {
+ return atomic.LoadUint64(&c.write)
+}
+
+// newIStorage returns the given storage wrapped by iStorage.
+func newIStorage(s storage.Storage) *iStorage {
+ return &iStorage{s, 0, 0}
+}
+
+type iStorageReader struct {
+ storage.Reader
+ c *iStorage
+}
+
+func (r *iStorageReader) Read(p []byte) (n int, err error) {
+ n, err = r.Reader.Read(p)
+ atomic.AddUint64(&r.c.read, uint64(n))
+ return n, err
+}
+
+func (r *iStorageReader) ReadAt(p []byte, off int64) (n int, err error) {
+ n, err = r.Reader.ReadAt(p, off)
+ atomic.AddUint64(&r.c.read, uint64(n))
+ return n, err
+}
+
+type iStorageWriter struct {
+ storage.Writer
+ c *iStorage
+}
+
+func (w *iStorageWriter) Write(p []byte) (n int, err error) {
+ n, err = w.Writer.Write(p)
+ atomic.AddUint64(&w.c.write, uint64(n))
+ return n, err
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
new file mode 100644
index 000000000..9ba71fd6d
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage.go
@@ -0,0 +1,671 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reservefs.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+var (
+ errFileOpen = errors.New("leveldb/storage: file still open")
+ errReadOnly = errors.New("leveldb/storage: storage is read-only")
+)
+
+type fileLock interface {
+ release() error
+}
+
+type fileStorageLock struct {
+ fs *fileStorage
+}
+
+func (lock *fileStorageLock) Unlock() {
+ if lock.fs != nil {
+ lock.fs.mu.Lock()
+ defer lock.fs.mu.Unlock()
+ if lock.fs.slock == lock {
+ lock.fs.slock = nil
+ }
+ }
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
+
+func writeFileSynced(filename string, data []byte, perm os.FileMode) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Sync(); err == nil {
+ err = err1
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+const logSizeThreshold = 1024 * 1024 // 1 MiB
+
+// fileStorage is a file-system backed storage.
+type fileStorage struct {
+ path string
+ readOnly bool
+
+ mu sync.Mutex
+ flock fileLock
+ slock *fileStorageLock
+ logw *os.File
+ logSize int64
+ buf []byte
+ // Opened file counter; if open < 0 means closed.
+ open int
+ day int
+}
+
+// OpenFile returns a new filesystem-backed storage implementation with the given
+// path. This also acquire a file lock, so any subsequent attempt to open the
+// same path will fail.
+//
+// The storage must be closed after use, by calling Close method.
+func OpenFile(path string, readOnly bool) (Storage, error) {
+ if fi, err := os.Stat(path); err == nil {
+ if !fi.IsDir() {
+ return nil, fmt.Errorf("leveldb/storage: open %s: not a directory", path)
+ }
+ } else if os.IsNotExist(err) && !readOnly {
+ if err := os.MkdirAll(path, 0755); err != nil {
+ return nil, err
+ }
+ } else {
+ return nil, err
+ }
+
+ flock, err := newFileLock(filepath.Join(path, "LOCK"), readOnly)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() {
+ if err != nil {
+ flock.release()
+ }
+ }()
+
+ var (
+ logw *os.File
+ logSize int64
+ )
+ if !readOnly {
+ logw, err = os.OpenFile(filepath.Join(path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, err
+ }
+ logSize, err = logw.Seek(0, os.SEEK_END)
+ if err != nil {
+ logw.Close()
+ return nil, err
+ }
+ }
+
+ fs := &fileStorage{
+ path: path,
+ readOnly: readOnly,
+ flock: flock,
+ logw: logw,
+ logSize: logSize,
+ }
+ runtime.SetFinalizer(fs, (*fileStorage).Close)
+ return fs, nil
+}
+
+func (fs *fileStorage) Lock() (Locker, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ if fs.readOnly {
+ return &fileStorageLock{}, nil
+ }
+ if fs.slock != nil {
+ return nil, ErrLocked
+ }
+ fs.slock = &fileStorageLock{fs: fs}
+ return fs.slock, nil
+}
+
+func itoa(buf []byte, i int, wid int) []byte {
+ u := uint(i)
+ if u == 0 && wid <= 1 {
+ return append(buf, '0')
+ }
+
+ // Assemble decimal in reverse order.
+ var b [32]byte
+ bp := len(b)
+ for ; u > 0 || wid > 0; u /= 10 {
+ bp--
+ wid--
+ b[bp] = byte(u%10) + '0'
+ }
+ return append(buf, b[bp:]...)
+}
+
+func (fs *fileStorage) printDay(t time.Time) {
+ if fs.day == t.Day() {
+ return
+ }
+ fs.day = t.Day()
+ fs.logw.Write([]byte("=============== " + t.Format("Jan 2, 2006 (MST)") + " ===============\n"))
+}
+
+func (fs *fileStorage) doLog(t time.Time, str string) {
+ if fs.logSize > logSizeThreshold {
+ // Rotate log file.
+ fs.logw.Close()
+ fs.logw = nil
+ fs.logSize = 0
+ rename(filepath.Join(fs.path, "LOG"), filepath.Join(fs.path, "LOG.old"))
+ }
+ if fs.logw == nil {
+ var err error
+ fs.logw, err = os.OpenFile(filepath.Join(fs.path, "LOG"), os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return
+ }
+ // Force printDay on new log file.
+ fs.day = 0
+ }
+ fs.printDay(t)
+ hour, min, sec := t.Clock()
+ msec := t.Nanosecond() / 1e3
+ // time
+ fs.buf = itoa(fs.buf[:0], hour, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, min, 2)
+ fs.buf = append(fs.buf, ':')
+ fs.buf = itoa(fs.buf, sec, 2)
+ fs.buf = append(fs.buf, '.')
+ fs.buf = itoa(fs.buf, msec, 6)
+ fs.buf = append(fs.buf, ' ')
+ // write
+ fs.buf = append(fs.buf, []byte(str)...)
+ fs.buf = append(fs.buf, '\n')
+ n, _ := fs.logw.Write(fs.buf)
+ fs.logSize += int64(n)
+}
+
+func (fs *fileStorage) Log(str string) {
+ if !fs.readOnly {
+ t := time.Now()
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return
+ }
+ fs.doLog(t, str)
+ }
+}
+
+func (fs *fileStorage) log(str string) {
+ if !fs.readOnly {
+ fs.doLog(time.Now(), str)
+ }
+}
+
+func (fs *fileStorage) setMeta(fd FileDesc) error {
+ content := fsGenName(fd) + "\n"
+ // Check and backup old CURRENT file.
+ currentPath := filepath.Join(fs.path, "CURRENT")
+ if _, err := os.Stat(currentPath); err == nil {
+ b, err := ioutil.ReadFile(currentPath)
+ if err != nil {
+ fs.log(fmt.Sprintf("backup CURRENT: %v", err))
+ return err
+ }
+ if string(b) == content {
+ // Content not changed, do nothing.
+ return nil
+ }
+ if err := writeFileSynced(currentPath+".bak", b, 0644); err != nil {
+ fs.log(fmt.Sprintf("backup CURRENT: %v", err))
+ return err
+ }
+ } else if !os.IsNotExist(err) {
+ return err
+ }
+ path := fmt.Sprintf("%s.%d", filepath.Join(fs.path, "CURRENT"), fd.Num)
+ if err := writeFileSynced(path, []byte(content), 0644); err != nil {
+ fs.log(fmt.Sprintf("create CURRENT.%d: %v", fd.Num, err))
+ return err
+ }
+ // Replace CURRENT file.
+ if err := rename(path, currentPath); err != nil {
+ fs.log(fmt.Sprintf("rename CURRENT.%d: %v", fd.Num, err))
+ return err
+ }
+ // Sync root directory.
+ if err := syncDir(fs.path); err != nil {
+ fs.log(fmt.Sprintf("syncDir: %v", err))
+ return err
+ }
+ return nil
+}
+
+func (fs *fileStorage) SetMeta(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ return fs.setMeta(fd)
+}
+
+func (fs *fileStorage) GetMeta() (FileDesc, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return FileDesc{}, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return FileDesc{}, err
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if ce := dir.Close(); ce != nil {
+ fs.log(fmt.Sprintf("close dir: %v", ce))
+ }
+ if err != nil {
+ return FileDesc{}, err
+ }
+ // Try this in order:
+ // - CURRENT.[0-9]+ ('pending rename' file, descending order)
+ // - CURRENT
+ // - CURRENT.bak
+ //
+ // Skip corrupted file or file that point to a missing target file.
+ type currentFile struct {
+ name string
+ fd FileDesc
+ }
+ tryCurrent := func(name string) (*currentFile, error) {
+ b, err := ioutil.ReadFile(filepath.Join(fs.path, name))
+ if err != nil {
+ if os.IsNotExist(err) {
+ err = os.ErrNotExist
+ }
+ return nil, err
+ }
+ var fd FileDesc
+ if len(b) < 1 || b[len(b)-1] != '\n' || !fsParseNamePtr(string(b[:len(b)-1]), &fd) {
+ fs.log(fmt.Sprintf("%s: corrupted content: %q", name, b))
+ err := &ErrCorrupted{
+ Err: errors.New("leveldb/storage: corrupted or incomplete CURRENT file"),
+ }
+ return nil, err
+ }
+ if _, err := os.Stat(filepath.Join(fs.path, fsGenName(fd))); err != nil {
+ if os.IsNotExist(err) {
+ fs.log(fmt.Sprintf("%s: missing target file: %s", name, fd))
+ err = os.ErrNotExist
+ }
+ return nil, err
+ }
+ return ¤tFile{name: name, fd: fd}, nil
+ }
+ tryCurrents := func(names []string) (*currentFile, error) {
+ var (
+ cur *currentFile
+ // Last corruption error.
+ lastCerr error
+ )
+ for _, name := range names {
+ var err error
+ cur, err = tryCurrent(name)
+ if err == nil {
+ break
+ } else if err == os.ErrNotExist {
+ // Fallback to the next file.
+ } else if isCorrupted(err) {
+ lastCerr = err
+ // Fallback to the next file.
+ } else {
+ // In case the error is due to permission, etc.
+ return nil, err
+ }
+ }
+ if cur == nil {
+ err := os.ErrNotExist
+ if lastCerr != nil {
+ err = lastCerr
+ }
+ return nil, err
+ }
+ return cur, nil
+ }
+
+ // Try 'pending rename' files.
+ var nums []int64
+ for _, name := range names {
+ if strings.HasPrefix(name, "CURRENT.") && name != "CURRENT.bak" {
+ i, err := strconv.ParseInt(name[8:], 10, 64)
+ if err == nil {
+ nums = append(nums, i)
+ }
+ }
+ }
+ var (
+ pendCur *currentFile
+ pendErr = os.ErrNotExist
+ pendNames []string
+ )
+ if len(nums) > 0 {
+ sort.Sort(sort.Reverse(int64Slice(nums)))
+ pendNames = make([]string, len(nums))
+ for i, num := range nums {
+ pendNames[i] = fmt.Sprintf("CURRENT.%d", num)
+ }
+ pendCur, pendErr = tryCurrents(pendNames)
+ if pendErr != nil && pendErr != os.ErrNotExist && !isCorrupted(pendErr) {
+ return FileDesc{}, pendErr
+ }
+ }
+
+ // Try CURRENT and CURRENT.bak.
+ curCur, curErr := tryCurrents([]string{"CURRENT", "CURRENT.bak"})
+ if curErr != nil && curErr != os.ErrNotExist && !isCorrupted(curErr) {
+ return FileDesc{}, curErr
+ }
+
+ // pendCur takes precedence, but guards against obsolete pendCur.
+ if pendCur != nil && (curCur == nil || pendCur.fd.Num > curCur.fd.Num) {
+ curCur = pendCur
+ }
+
+ if curCur != nil {
+ // Restore CURRENT file to proper state.
+ if !fs.readOnly && (curCur.name != "CURRENT" || len(pendNames) != 0) {
+ // Ignore setMeta errors, however don't delete obsolete files if we
+ // catch error.
+ if err := fs.setMeta(curCur.fd); err == nil {
+ // Remove 'pending rename' files.
+ for _, name := range pendNames {
+ if err := os.Remove(filepath.Join(fs.path, name)); err != nil {
+ fs.log(fmt.Sprintf("remove %s: %v", name, err))
+ }
+ }
+ }
+ }
+ return curCur.fd, nil
+ }
+
+ // Nothing found.
+ if isCorrupted(pendErr) {
+ return FileDesc{}, pendErr
+ }
+ return FileDesc{}, curErr
+}
+
+func (fs *fileStorage) List(ft FileType) (fds []FileDesc, err error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ dir, err := os.Open(fs.path)
+ if err != nil {
+ return
+ }
+ names, err := dir.Readdirnames(0)
+ // Close the dir first before checking for Readdirnames error.
+ if cerr := dir.Close(); cerr != nil {
+ fs.log(fmt.Sprintf("close dir: %v", cerr))
+ }
+ if err == nil {
+ for _, name := range names {
+ if fd, ok := fsParseName(name); ok && fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ }
+ return
+}
+
+func (fs *fileStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_RDONLY, 0)
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ of, err = os.OpenFile(filepath.Join(fs.path, fsGenOldName(fd)), os.O_RDONLY, 0)
+ if err == nil {
+ goto ok
+ }
+ }
+ return nil, err
+ }
+ok:
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+ if fs.readOnly {
+ return nil, errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return nil, ErrClosed
+ }
+ of, err := os.OpenFile(filepath.Join(fs.path, fsGenName(fd)), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ return nil, err
+ }
+ fs.open++
+ return &fileWrap{File: of, fs: fs, fd: fd}, nil
+}
+
+func (fs *fileStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ err := os.Remove(filepath.Join(fs.path, fsGenName(fd)))
+ if err != nil {
+ if fsHasOldName(fd) && os.IsNotExist(err) {
+ if e1 := os.Remove(filepath.Join(fs.path, fsGenOldName(fd))); !os.IsNotExist(e1) {
+ fs.log(fmt.Sprintf("remove %s: %v (old name)", fd, err))
+ err = e1
+ }
+ } else {
+ fs.log(fmt.Sprintf("remove %s: %v", fd, err))
+ }
+ }
+ return err
+}
+
+func (fs *fileStorage) Rename(oldfd, newfd FileDesc) error {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+ if fs.readOnly {
+ return errReadOnly
+ }
+
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ return rename(filepath.Join(fs.path, fsGenName(oldfd)), filepath.Join(fs.path, fsGenName(newfd)))
+}
+
+func (fs *fileStorage) Close() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if fs.open < 0 {
+ return ErrClosed
+ }
+ // Clear the finalizer.
+ runtime.SetFinalizer(fs, nil)
+
+ if fs.open > 0 {
+ fs.log(fmt.Sprintf("close: warning, %d files still open", fs.open))
+ }
+ fs.open = -1
+ if fs.logw != nil {
+ fs.logw.Close()
+ }
+ return fs.flock.release()
+}
+
+type fileWrap struct {
+ *os.File
+ fs *fileStorage
+ fd FileDesc
+ closed bool
+}
+
+func (fw *fileWrap) Sync() error {
+ if err := fw.File.Sync(); err != nil {
+ return err
+ }
+ if fw.fd.Type == TypeManifest {
+ // Also sync parent directory if file type is manifest.
+ // See: https://code.google.com/p/leveldb/issues/detail?id=190.
+ if err := syncDir(fw.fs.path); err != nil {
+ fw.fs.log(fmt.Sprintf("syncDir: %v", err))
+ return err
+ }
+ }
+ return nil
+}
+
+func (fw *fileWrap) Close() error {
+ fw.fs.mu.Lock()
+ defer fw.fs.mu.Unlock()
+ if fw.closed {
+ return ErrClosed
+ }
+ fw.closed = true
+ fw.fs.open--
+ err := fw.File.Close()
+ if err != nil {
+ fw.fs.log(fmt.Sprintf("close %s: %v", fw.fd, err))
+ }
+ return err
+}
+
+func fsGenName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ panic("invalid file type")
+ }
+}
+
+func fsHasOldName(fd FileDesc) bool {
+ return fd.Type == TypeTable
+}
+
+func fsGenOldName(fd FileDesc) string {
+ switch fd.Type {
+ case TypeTable:
+ return fmt.Sprintf("%06d.sst", fd.Num)
+ }
+ return fsGenName(fd)
+}
+
+func fsParseName(name string) (fd FileDesc, ok bool) {
+ var tail string
+ _, err := fmt.Sscanf(name, "%d.%s", &fd.Num, &tail)
+ if err == nil {
+ switch tail {
+ case "log":
+ fd.Type = TypeJournal
+ case "ldb", "sst":
+ fd.Type = TypeTable
+ case "tmp":
+ fd.Type = TypeTemp
+ default:
+ return
+ }
+ return fd, true
+ }
+ n, _ := fmt.Sscanf(name, "MANIFEST-%d%s", &fd.Num, &tail)
+ if n == 1 {
+ fd.Type = TypeManifest
+ return fd, true
+ }
+ return
+}
+
+func fsParseNamePtr(name string, fd *FileDesc) bool {
+ _fd, ok := fsParseName(name)
+ if fd != nil {
+ *fd = _fd
+ }
+ return ok
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
new file mode 100644
index 000000000..5545aeef2
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_nacl.go
@@ -0,0 +1,34 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build nacl
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ return nil, syscall.ENOTSUP
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ return syscall.ENOTSUP
+}
+
+func rename(oldpath, newpath string) error {
+ return syscall.ENOTSUP
+}
+
+func isErrInvalid(err error) bool {
+ return false
+}
+
+func syncDir(name string) error {
+ return syscall.ENOTSUP
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
new file mode 100644
index 000000000..b82979801
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_plan9.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "os"
+)
+
+type plan9FileLock struct {
+ f *os.File
+}
+
+func (fl *plan9FileLock) release() error {
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var (
+ flag int
+ perm os.FileMode
+ )
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ perm = os.ModeExclusive
+ }
+ f, err := os.OpenFile(path, flag, perm)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, perm|0644)
+ }
+ if err != nil {
+ return
+ }
+ fl = &plan9FileLock{f: f}
+ return
+}
+
+func rename(oldpath, newpath string) error {
+ if _, err := os.Stat(newpath); err == nil {
+ if err := os.Remove(newpath); err != nil {
+ return err
+ }
+ }
+
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
new file mode 100644
index 000000000..79901ee4a
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_solaris.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build solaris
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ flock := syscall.Flock_t{
+ Type: syscall.F_UNLCK,
+ Start: 0,
+ Len: 0,
+ Whence: 1,
+ }
+ if lock {
+ if readOnly {
+ flock.Type = syscall.F_RDLCK
+ } else {
+ flock.Type = syscall.F_WRLCK
+ }
+ }
+ return syscall.FcntlFlock(f.Fd(), syscall.F_SETLK, &flock)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func syncDir(name string) error {
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
new file mode 100644
index 000000000..d75f66a9e
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_unix.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package storage
+
+import (
+ "os"
+ "syscall"
+)
+
+type unixFileLock struct {
+ f *os.File
+}
+
+func (fl *unixFileLock) release() error {
+ if err := setFileLock(fl.f, false, false); err != nil {
+ return err
+ }
+ return fl.f.Close()
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ var flag int
+ if readOnly {
+ flag = os.O_RDONLY
+ } else {
+ flag = os.O_RDWR
+ }
+ f, err := os.OpenFile(path, flag, 0)
+ if os.IsNotExist(err) {
+ f, err = os.OpenFile(path, flag|os.O_CREATE, 0644)
+ }
+ if err != nil {
+ return
+ }
+ err = setFileLock(f, readOnly, true)
+ if err != nil {
+ f.Close()
+ return
+ }
+ fl = &unixFileLock{f: f}
+ return
+}
+
+func setFileLock(f *os.File, readOnly, lock bool) error {
+ how := syscall.LOCK_UN
+ if lock {
+ if readOnly {
+ how = syscall.LOCK_SH
+ } else {
+ how = syscall.LOCK_EX
+ }
+ }
+ return syscall.Flock(int(f.Fd()), how|syscall.LOCK_NB)
+}
+
+func rename(oldpath, newpath string) error {
+ return os.Rename(oldpath, newpath)
+}
+
+func isErrInvalid(err error) bool {
+ if err == os.ErrInvalid {
+ return true
+ }
+ // Go < 1.8
+ if syserr, ok := err.(*os.SyscallError); ok && syserr.Err == syscall.EINVAL {
+ return true
+ }
+ // Go >= 1.8 returns *os.PathError instead
+ if patherr, ok := err.(*os.PathError); ok && patherr.Err == syscall.EINVAL {
+ return true
+ }
+ return false
+}
+
+func syncDir(name string) error {
+ // As per fsync manpage, Linux seems to expect fsync on directory, however
+ // some system don't support this, so we will ignore syscall.EINVAL.
+ //
+ // From fsync(2):
+ // Calling fsync() does not necessarily ensure that the entry in the
+ // directory containing the file has also reached disk. For that an
+ // explicit fsync() on a file descriptor for the directory is also needed.
+ f, err := os.Open(name)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ if err := f.Sync(); err != nil && !isErrInvalid(err) {
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
new file mode 100644
index 000000000..899335fd7
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/file_storage_windows.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+var (
+ modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+ procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+const (
+ _MOVEFILE_REPLACE_EXISTING = 1
+)
+
+type windowsFileLock struct {
+ fd syscall.Handle
+}
+
+func (fl *windowsFileLock) release() error {
+ return syscall.Close(fl.fd)
+}
+
+func newFileLock(path string, readOnly bool) (fl fileLock, err error) {
+ pathp, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return
+ }
+ var access, shareMode uint32
+ if readOnly {
+ access = syscall.GENERIC_READ
+ shareMode = syscall.FILE_SHARE_READ
+ } else {
+ access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+ }
+ fd, err := syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_EXISTING, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ if err == syscall.ERROR_FILE_NOT_FOUND {
+ fd, err = syscall.CreateFile(pathp, access, shareMode, nil, syscall.OPEN_ALWAYS, syscall.FILE_ATTRIBUTE_NORMAL, 0)
+ }
+ if err != nil {
+ return
+ }
+ fl = &windowsFileLock{fd: fd}
+ return
+}
+
+func moveFileEx(from *uint16, to *uint16, flags uint32) error {
+ r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(from)), uintptr(unsafe.Pointer(to)), uintptr(flags))
+ if r1 == 0 {
+ if e1 != 0 {
+ return error(e1)
+ }
+ return syscall.EINVAL
+ }
+ return nil
+}
+
+func rename(oldpath, newpath string) error {
+ from, err := syscall.UTF16PtrFromString(oldpath)
+ if err != nil {
+ return err
+ }
+ to, err := syscall.UTF16PtrFromString(newpath)
+ if err != nil {
+ return err
+ }
+ return moveFileEx(from, to, _MOVEFILE_REPLACE_EXISTING)
+}
+
+func syncDir(name string) error { return nil }
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
new file mode 100644
index 000000000..838f1bee1
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/mem_storage.go
@@ -0,0 +1,222 @@
+// Copyright (c) 2013, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package storage
+
+import (
+ "bytes"
+ "os"
+ "sync"
+)
+
+const typeShift = 4
+
+// Verify at compile-time that typeShift is large enough to cover all FileType
+// values by confirming that 0 == 0.
+var _ [0]struct{} = [TypeAll >> typeShift]struct{}{}
+
+type memStorageLock struct {
+ ms *memStorage
+}
+
+func (lock *memStorageLock) Unlock() {
+ ms := lock.ms
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock == lock {
+ ms.slock = nil
+ }
+ return
+}
+
+// memStorage is a memory-backed storage.
+type memStorage struct {
+ mu sync.Mutex
+ slock *memStorageLock
+ files map[uint64]*memFile
+ meta FileDesc
+}
+
+// NewMemStorage returns a new memory-backed storage implementation.
+func NewMemStorage() Storage {
+ return &memStorage{
+ files: make(map[uint64]*memFile),
+ }
+}
+
+func (ms *memStorage) Lock() (Locker, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.slock != nil {
+ return nil, ErrLocked
+ }
+ ms.slock = &memStorageLock{ms: ms}
+ return ms.slock, nil
+}
+
+func (*memStorage) Log(str string) {}
+
+func (ms *memStorage) SetMeta(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ ms.meta = fd
+ ms.mu.Unlock()
+ return nil
+}
+
+func (ms *memStorage) GetMeta() (FileDesc, error) {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if ms.meta.Zero() {
+ return FileDesc{}, os.ErrNotExist
+ }
+ return ms.meta, nil
+}
+
+func (ms *memStorage) List(ft FileType) ([]FileDesc, error) {
+ ms.mu.Lock()
+ var fds []FileDesc
+ for x := range ms.files {
+ fd := unpackFile(x)
+ if fd.Type&ft != 0 {
+ fds = append(fds, fd)
+ }
+ }
+ ms.mu.Unlock()
+ return fds, nil
+}
+
+func (ms *memStorage) Open(fd FileDesc) (Reader, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if m, exist := ms.files[packFile(fd)]; exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.open = true
+ return &memReader{Reader: bytes.NewReader(m.Bytes()), ms: ms, m: m}, nil
+ }
+ return nil, os.ErrNotExist
+}
+
+func (ms *memStorage) Create(fd FileDesc) (Writer, error) {
+ if !FileDescOk(fd) {
+ return nil, ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ m, exist := ms.files[x]
+ if exist {
+ if m.open {
+ return nil, errFileOpen
+ }
+ m.Reset()
+ } else {
+ m = &memFile{}
+ ms.files[x] = m
+ }
+ m.open = true
+ return &memWriter{memFile: m, ms: ms}, nil
+}
+
+func (ms *memStorage) Remove(fd FileDesc) error {
+ if !FileDescOk(fd) {
+ return ErrInvalidFile
+ }
+
+ x := packFile(fd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ if _, exist := ms.files[x]; exist {
+ delete(ms.files, x)
+ return nil
+ }
+ return os.ErrNotExist
+}
+
+func (ms *memStorage) Rename(oldfd, newfd FileDesc) error {
+ if !FileDescOk(oldfd) || !FileDescOk(newfd) {
+ return ErrInvalidFile
+ }
+ if oldfd == newfd {
+ return nil
+ }
+
+ oldx := packFile(oldfd)
+ newx := packFile(newfd)
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+ oldm, exist := ms.files[oldx]
+ if !exist {
+ return os.ErrNotExist
+ }
+ newm, exist := ms.files[newx]
+ if (exist && newm.open) || oldm.open {
+ return errFileOpen
+ }
+ delete(ms.files, oldx)
+ ms.files[newx] = oldm
+ return nil
+}
+
+func (*memStorage) Close() error { return nil }
+
+type memFile struct {
+ bytes.Buffer
+ open bool
+}
+
+type memReader struct {
+ *bytes.Reader
+ ms *memStorage
+ m *memFile
+ closed bool
+}
+
+func (mr *memReader) Close() error {
+ mr.ms.mu.Lock()
+ defer mr.ms.mu.Unlock()
+ if mr.closed {
+ return ErrClosed
+ }
+ mr.m.open = false
+ return nil
+}
+
+type memWriter struct {
+ *memFile
+ ms *memStorage
+ closed bool
+}
+
+func (*memWriter) Sync() error { return nil }
+
+func (mw *memWriter) Close() error {
+ mw.ms.mu.Lock()
+ defer mw.ms.mu.Unlock()
+ if mw.closed {
+ return ErrClosed
+ }
+ mw.memFile.open = false
+ return nil
+}
+
+func packFile(fd FileDesc) uint64 {
+ return uint64(fd.Num)<> typeShift)}
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
new file mode 100644
index 000000000..4e4a72425
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/storage/storage.go
@@ -0,0 +1,187 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package storage provides storage abstraction for LevelDB.
+package storage
+
+import (
+ "errors"
+ "fmt"
+ "io"
+)
+
+// FileType represent a file type.
+type FileType int
+
+// File types.
+const (
+ TypeManifest FileType = 1 << iota
+ TypeJournal
+ TypeTable
+ TypeTemp
+
+ TypeAll = TypeManifest | TypeJournal | TypeTable | TypeTemp
+)
+
+func (t FileType) String() string {
+ switch t {
+ case TypeManifest:
+ return "manifest"
+ case TypeJournal:
+ return "journal"
+ case TypeTable:
+ return "table"
+ case TypeTemp:
+ return "temp"
+ }
+ return fmt.Sprintf("", t)
+}
+
+// Common error.
+var (
+ ErrInvalidFile = errors.New("leveldb/storage: invalid file for argument")
+ ErrLocked = errors.New("leveldb/storage: already locked")
+ ErrClosed = errors.New("leveldb/storage: closed")
+)
+
+// ErrCorrupted is the type that wraps errors that indicate corruption of
+// a file. Package storage has its own type instead of using
+// errors.ErrCorrupted to prevent circular import.
+type ErrCorrupted struct {
+ Fd FileDesc
+ Err error
+}
+
+func isCorrupted(err error) bool {
+ switch err.(type) {
+ case *ErrCorrupted:
+ return true
+ }
+ return false
+}
+
+func (e *ErrCorrupted) Error() string {
+ if !e.Fd.Zero() {
+ return fmt.Sprintf("%v [file=%v]", e.Err, e.Fd)
+ }
+ return e.Err.Error()
+}
+
+// Syncer is the interface that wraps basic Sync method.
+type Syncer interface {
+ // Sync commits the current contents of the file to stable storage.
+ Sync() error
+}
+
+// Reader is the interface that groups the basic Read, Seek, ReadAt and Close
+// methods.
+type Reader interface {
+ io.ReadSeeker
+ io.ReaderAt
+ io.Closer
+}
+
+// Writer is the interface that groups the basic Write, Sync and Close
+// methods.
+type Writer interface {
+ io.WriteCloser
+ Syncer
+}
+
+// Locker is the interface that wraps Unlock method.
+type Locker interface {
+ Unlock()
+}
+
+// FileDesc is a 'file descriptor'.
+type FileDesc struct {
+ Type FileType
+ Num int64
+}
+
+func (fd FileDesc) String() string {
+ switch fd.Type {
+ case TypeManifest:
+ return fmt.Sprintf("MANIFEST-%06d", fd.Num)
+ case TypeJournal:
+ return fmt.Sprintf("%06d.log", fd.Num)
+ case TypeTable:
+ return fmt.Sprintf("%06d.ldb", fd.Num)
+ case TypeTemp:
+ return fmt.Sprintf("%06d.tmp", fd.Num)
+ default:
+ return fmt.Sprintf("%#x-%d", fd.Type, fd.Num)
+ }
+}
+
+// Zero returns true if fd == (FileDesc{}).
+func (fd FileDesc) Zero() bool {
+ return fd == (FileDesc{})
+}
+
+// FileDescOk returns true if fd is a valid 'file descriptor'.
+func FileDescOk(fd FileDesc) bool {
+ switch fd.Type {
+ case TypeManifest:
+ case TypeJournal:
+ case TypeTable:
+ case TypeTemp:
+ default:
+ return false
+ }
+ return fd.Num >= 0
+}
+
+// Storage is the storage. A storage instance must be safe for concurrent use.
+type Storage interface {
+ // Lock locks the storage. Any subsequent attempt to call Lock will fail
+ // until the last lock released.
+ // Caller should call Unlock method after use.
+ Lock() (Locker, error)
+
+ // Log logs a string. This is used for logging.
+ // An implementation may write to a file, stdout or simply do nothing.
+ Log(str string)
+
+ // SetMeta store 'file descriptor' that can later be acquired using GetMeta
+ // method. The 'file descriptor' should point to a valid file.
+ // SetMeta should be implemented in such way that changes should happen
+ // atomically.
+ SetMeta(fd FileDesc) error
+
+ // GetMeta returns 'file descriptor' stored in meta. The 'file descriptor'
+ // can be updated using SetMeta method.
+ // Returns os.ErrNotExist if meta doesn't store any 'file descriptor', or
+ // 'file descriptor' point to nonexistent file.
+ GetMeta() (FileDesc, error)
+
+ // List returns file descriptors that match the given file types.
+ // The file types may be OR'ed together.
+ List(ft FileType) ([]FileDesc, error)
+
+ // Open opens file with the given 'file descriptor' read-only.
+ // Returns os.ErrNotExist error if the file does not exist.
+ // Returns ErrClosed if the underlying storage is closed.
+ Open(fd FileDesc) (Reader, error)
+
+ // Create creates file with the given 'file descriptor', truncate if already
+ // exist and opens write-only.
+ // Returns ErrClosed if the underlying storage is closed.
+ Create(fd FileDesc) (Writer, error)
+
+ // Remove removes file with the given 'file descriptor'.
+ // Returns ErrClosed if the underlying storage is closed.
+ Remove(fd FileDesc) error
+
+ // Rename renames file from oldfd to newfd.
+ // Returns ErrClosed if the underlying storage is closed.
+ Rename(oldfd, newfd FileDesc) error
+
+ // Close closes the storage.
+ // It is valid to call Close multiple times. Other methods should not be
+ // called after the storage has been closed.
+ Close() error
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
new file mode 100644
index 000000000..adf773f13
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table.go
@@ -0,0 +1,529 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package leveldb
+
+import (
+ "fmt"
+ "sort"
+ "sync/atomic"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/table"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// tFile holds basic information about a table.
+type tFile struct {
+ fd storage.FileDesc
+ seekLeft int32
+ size int64
+ imin, imax internalKey
+}
+
+// Returns true if given key is after largest key of this table.
+func (t *tFile) after(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0
+}
+
+// Returns true if given key is before smallest key of this table.
+func (t *tFile) before(icmp *iComparer, ukey []byte) bool {
+ return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0
+}
+
+// Returns true if given key range overlaps with this table key range.
+func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool {
+ return !t.after(icmp, umin) && !t.before(icmp, umax)
+}
+
+// Cosumes one seek and return current seeks left.
+func (t *tFile) consumeSeek() int32 {
+ return atomic.AddInt32(&t.seekLeft, -1)
+}
+
+// Creates new tFile.
+func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile {
+ f := &tFile{
+ fd: fd,
+ size: size,
+ imin: imin,
+ imax: imax,
+ }
+
+ // We arrange to automatically compact this file after
+ // a certain number of seeks. Let's assume:
+ // (1) One seek costs 10ms
+ // (2) Writing or reading 1MB costs 10ms (100MB/s)
+ // (3) A compaction of 1MB does 25MB of IO:
+ // 1MB read from this level
+ // 10-12MB read from next level (boundaries may be misaligned)
+ // 10-12MB written to next level
+ // This implies that 25 seeks cost the same as the compaction
+ // of 1MB of data. I.e., one seek costs approximately the
+ // same as the compaction of 40KB of data. We are a little
+ // conservative and allow approximately one seek for every 16KB
+ // of data before triggering a compaction.
+ f.seekLeft = int32(size / 16384)
+ if f.seekLeft < 100 {
+ f.seekLeft = 100
+ }
+
+ return f
+}
+
+func tableFileFromRecord(r atRecord) *tFile {
+ return newTableFile(storage.FileDesc{storage.TypeTable, r.num}, r.size, r.imin, r.imax)
+}
+
+// tFiles hold multiple tFile.
+type tFiles []*tFile
+
+func (tf tFiles) Len() int { return len(tf) }
+func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
+
+func (tf tFiles) nums() string {
+ x := "[ "
+ for i, f := range tf {
+ if i != 0 {
+ x += ", "
+ }
+ x += fmt.Sprint(f.fd.Num)
+ }
+ x += " ]"
+ return x
+}
+
+// Returns true if i smallest key is less than j.
+// This used for sort by key in ascending order.
+func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
+ a, b := tf[i], tf[j]
+ n := icmp.Compare(a.imin, b.imin)
+ if n == 0 {
+ return a.fd.Num < b.fd.Num
+ }
+ return n < 0
+}
+
+// Returns true if i file number is greater than j.
+// This used for sort by file number in descending order.
+func (tf tFiles) lessByNum(i, j int) bool {
+ return tf[i].fd.Num > tf[j].fd.Num
+}
+
+// Sorts tables by key in ascending order.
+func (tf tFiles) sortByKey(icmp *iComparer) {
+ sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
+}
+
+// Sorts tables by file number in descending order.
+func (tf tFiles) sortByNum() {
+ sort.Sort(&tFilesSortByNum{tFiles: tf})
+}
+
+// Returns sum of all tables size.
+func (tf tFiles) size() (sum int64) {
+ for _, t := range tf {
+ sum += t.size
+ }
+ return sum
+}
+
+// Searches smallest index of tables whose its smallest
+// key is after or equal with given key.
+func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imin, ikey) >= 0
+ })
+}
+
+// Searches smallest index of tables whose its largest
+// key is after or equal with given key.
+func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
+ return sort.Search(len(tf), func(i int) bool {
+ return icmp.Compare(tf[i].imax, ikey) >= 0
+ })
+}
+
+// Returns true if given key range overlaps with one or more
+// tables key range. If unsorted is true then binary search will not be used.
+func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
+ if unsorted {
+ // Check against all files.
+ for _, t := range tf {
+ if t.overlaps(icmp, umin, umax) {
+ return true
+ }
+ }
+ return false
+ }
+
+ i := 0
+ if len(umin) > 0 {
+ // Find the earliest possible internal key for min.
+ i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
+ }
+ if i >= len(tf) {
+ // Beginning of range is after all files, so no overlap.
+ return false
+ }
+ return !tf[i].before(icmp, umax)
+}
+
+// Returns tables whose its key range overlaps with given key range.
+// Range will be expanded if ukey found hop across tables.
+// If overlapped is true then the search will be restarted if umax
+// expanded.
+// The dst content will be overwritten.
+func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
+ dst = dst[:0]
+ for i := 0; i < len(tf); {
+ t := tf[i]
+ if t.overlaps(icmp, umin, umax) {
+ if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
+ umin = t.imin.ukey()
+ dst = dst[:0]
+ i = 0
+ continue
+ } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
+ umax = t.imax.ukey()
+ // Restart search if it is overlapped.
+ if overlapped {
+ dst = dst[:0]
+ i = 0
+ continue
+ }
+ }
+
+ dst = append(dst, t)
+ }
+ i++
+ }
+
+ return dst
+}
+
+// Returns tables key range.
+func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
+ for i, t := range tf {
+ if i == 0 {
+ imin, imax = t.imin, t.imax
+ continue
+ }
+ if icmp.Compare(t.imin, imin) < 0 {
+ imin = t.imin
+ }
+ if icmp.Compare(t.imax, imax) > 0 {
+ imax = t.imax
+ }
+ }
+
+ return
+}
+
+// Creates iterator index from tables.
+func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
+ if slice != nil {
+ var start, limit int
+ if slice.Start != nil {
+ start = tf.searchMax(icmp, internalKey(slice.Start))
+ }
+ if slice.Limit != nil {
+ limit = tf.searchMin(icmp, internalKey(slice.Limit))
+ } else {
+ limit = tf.Len()
+ }
+ tf = tf[start:limit]
+ }
+ return iterator.NewArrayIndexer(&tFilesArrayIndexer{
+ tFiles: tf,
+ tops: tops,
+ icmp: icmp,
+ slice: slice,
+ ro: ro,
+ })
+}
+
+// Tables iterator index.
+type tFilesArrayIndexer struct {
+ tFiles
+ tops *tOps
+ icmp *iComparer
+ slice *util.Range
+ ro *opt.ReadOptions
+}
+
+func (a *tFilesArrayIndexer) Search(key []byte) int {
+ return a.searchMax(a.icmp, internalKey(key))
+}
+
+func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
+ if i == 0 || i == a.Len()-1 {
+ return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
+ }
+ return a.tops.newIterator(a.tFiles[i], nil, a.ro)
+}
+
+// Helper type for sortByKey.
+type tFilesSortByKey struct {
+ tFiles
+ icmp *iComparer
+}
+
+func (x *tFilesSortByKey) Less(i, j int) bool {
+ return x.lessByKey(x.icmp, i, j)
+}
+
+// Helper type for sortByNum.
+type tFilesSortByNum struct {
+ tFiles
+}
+
+func (x *tFilesSortByNum) Less(i, j int) bool {
+ return x.lessByNum(i, j)
+}
+
+// Table operations.
+type tOps struct {
+ s *session
+ noSync bool
+ cache *cache.Cache
+ bcache *cache.Cache
+ bpool *util.BufferPool
+}
+
+// Creates an empty table and returns table writer.
+func (t *tOps) create() (*tWriter, error) {
+ fd := storage.FileDesc{storage.TypeTable, t.s.allocFileNum()}
+ fw, err := t.s.stor.Create(fd)
+ if err != nil {
+ return nil, err
+ }
+ return &tWriter{
+ t: t,
+ fd: fd,
+ w: fw,
+ tw: table.NewWriter(fw, t.s.o.Options),
+ }, nil
+}
+
+// Builds table from src iterator.
+func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
+ w, err := t.create()
+ if err != nil {
+ return
+ }
+
+ defer func() {
+ if err != nil {
+ w.drop()
+ }
+ }()
+
+ for src.Next() {
+ err = w.append(src.Key(), src.Value())
+ if err != nil {
+ return
+ }
+ }
+ err = src.Error()
+ if err != nil {
+ return
+ }
+
+ n = w.tw.EntriesLen()
+ f, err = w.finish()
+ return
+}
+
+// Opens table. It returns a cache handle, which should
+// be released after use.
+func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
+ ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
+ var r storage.Reader
+ r, err = t.s.stor.Open(f.fd)
+ if err != nil {
+ return 0, nil
+ }
+
+ var bcache *cache.NamespaceGetter
+ if t.bcache != nil {
+ bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
+ }
+
+ var tr *table.Reader
+ tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
+ if err != nil {
+ r.Close()
+ return 0, nil
+ }
+ return 1, tr
+
+ })
+ if ch == nil && err == nil {
+ err = ErrClosed
+ }
+ return
+}
+
+// Finds key/value pair whose key is greater than or equal to the
+// given key.
+func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).Find(key, true, ro)
+}
+
+// Finds key that is greater than or equal to the given key.
+func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return nil, err
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).FindKey(key, true, ro)
+}
+
+// Returns approximate offset of the given key.
+func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
+ ch, err := t.open(f)
+ if err != nil {
+ return
+ }
+ defer ch.Release()
+ return ch.Value().(*table.Reader).OffsetOf(key)
+}
+
+// Creates an iterator from the given table.
+func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ ch, err := t.open(f)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ iter := ch.Value().(*table.Reader).NewIterator(slice, ro)
+ iter.SetReleaser(ch)
+ return iter
+}
+
+// Removes table from persistent storage. It waits until
+// no one use the the table.
+func (t *tOps) remove(f *tFile) {
+ t.cache.Delete(0, uint64(f.fd.Num), func() {
+ if err := t.s.stor.Remove(f.fd); err != nil {
+ t.s.logf("table@remove removing @%d %q", f.fd.Num, err)
+ } else {
+ t.s.logf("table@remove removed @%d", f.fd.Num)
+ }
+ if t.bcache != nil {
+ t.bcache.EvictNS(uint64(f.fd.Num))
+ }
+ })
+}
+
+// Closes the table ops instance. It will close all tables,
+// regadless still used or not.
+func (t *tOps) close() {
+ t.bpool.Close()
+ t.cache.Close()
+ if t.bcache != nil {
+ t.bcache.CloseWeak()
+ }
+}
+
+// Creates new initialized table ops instance.
+func newTableOps(s *session) *tOps {
+ var (
+ cacher cache.Cacher
+ bcache *cache.Cache
+ bpool *util.BufferPool
+ )
+ if s.o.GetOpenFilesCacheCapacity() > 0 {
+ cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity())
+ }
+ if !s.o.GetDisableBlockCache() {
+ var bcacher cache.Cacher
+ if s.o.GetBlockCacheCapacity() > 0 {
+ bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity())
+ }
+ bcache = cache.NewCache(bcacher)
+ }
+ if !s.o.GetDisableBufferPool() {
+ bpool = util.NewBufferPool(s.o.GetBlockSize() + 5)
+ }
+ return &tOps{
+ s: s,
+ noSync: s.o.GetNoSync(),
+ cache: cache.NewCache(cacher),
+ bcache: bcache,
+ bpool: bpool,
+ }
+}
+
+// tWriter wraps the table writer. It keep track of file descriptor
+// and added key range.
+type tWriter struct {
+ t *tOps
+
+ fd storage.FileDesc
+ w storage.Writer
+ tw *table.Writer
+
+ first, last []byte
+}
+
+// Append key/value pair to the table.
+func (w *tWriter) append(key, value []byte) error {
+ if w.first == nil {
+ w.first = append([]byte{}, key...)
+ }
+ w.last = append(w.last[:0], key...)
+ return w.tw.Append(key, value)
+}
+
+// Returns true if the table is empty.
+func (w *tWriter) empty() bool {
+ return w.first == nil
+}
+
+// Closes the storage.Writer.
+func (w *tWriter) close() {
+ if w.w != nil {
+ w.w.Close()
+ w.w = nil
+ }
+}
+
+// Finalizes the table and returns table file.
+func (w *tWriter) finish() (f *tFile, err error) {
+ defer w.close()
+ err = w.tw.Close()
+ if err != nil {
+ return
+ }
+ if !w.t.noSync {
+ err = w.w.Sync()
+ if err != nil {
+ return
+ }
+ }
+ f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last))
+ return
+}
+
+// Drops the table.
+func (w *tWriter) drop() {
+ w.close()
+ w.t.s.stor.Remove(w.fd)
+ w.t.s.reuseFileNum(w.fd.Num)
+ w.tw = nil
+ w.first = nil
+ w.last = nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
new file mode 100644
index 000000000..16cfbaa00
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/reader.go
@@ -0,0 +1,1135 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/cache"
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/errors"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/iterator"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/storage"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+// Reader errors.
+var (
+ ErrNotFound = errors.ErrNotFound
+ ErrReaderReleased = errors.New("leveldb/table: reader released")
+ ErrIterReleased = errors.New("leveldb/table: iterator released")
+)
+
+// ErrCorrupted describes error due to corruption. This error will be wrapped
+// with errors.ErrCorrupted.
+type ErrCorrupted struct {
+ Pos int64
+ Size int64
+ Kind string
+ Reason string
+}
+
+func (e *ErrCorrupted) Error() string {
+ return fmt.Sprintf("leveldb/table: corruption on %s (pos=%d): %s", e.Kind, e.Pos, e.Reason)
+}
+
+func max(x, y int) int {
+ if x > y {
+ return x
+ }
+ return y
+}
+
+type block struct {
+ bpool *util.BufferPool
+ bh blockHandle
+ data []byte
+ restartsLen int
+ restartsOffset int
+}
+
+func (b *block) seek(cmp comparer.Comparer, rstart, rlimit int, key []byte) (index, offset int, err error) {
+ index = sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ offset := int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):]))
+ offset++ // shared always zero, since this is a restart point
+ v1, n1 := binary.Uvarint(b.data[offset:]) // key length
+ _, n2 := binary.Uvarint(b.data[offset+n1:]) // value length
+ m := offset + n1 + n2
+ return cmp.Compare(b.data[m:m+int(v1)], key) > 0
+ }) + rstart - 1
+ if index < rstart {
+ // The smallest key is greater-than key sought.
+ index = rstart
+ }
+ offset = int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+ return
+}
+
+func (b *block) restartIndex(rstart, rlimit, offset int) int {
+ return sort.Search(b.restartsLen-rstart-(b.restartsLen-rlimit), func(i int) bool {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*(rstart+i):])) > offset
+ }) + rstart - 1
+}
+
+func (b *block) restartOffset(index int) int {
+ return int(binary.LittleEndian.Uint32(b.data[b.restartsOffset+4*index:]))
+}
+
+func (b *block) entry(offset int) (key, value []byte, nShared, n int, err error) {
+ if offset >= b.restartsOffset {
+ if offset != b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries offset not aligned"}
+ }
+ return
+ }
+ v0, n0 := binary.Uvarint(b.data[offset:]) // Shared prefix length
+ v1, n1 := binary.Uvarint(b.data[offset+n0:]) // Key length
+ v2, n2 := binary.Uvarint(b.data[offset+n0+n1:]) // Value length
+ m := n0 + n1 + n2
+ n = m + int(v1) + int(v2)
+ if n0 <= 0 || n1 <= 0 || n2 <= 0 || offset+n > b.restartsOffset {
+ err = &ErrCorrupted{Reason: "entries corrupted"}
+ return
+ }
+ key = b.data[offset+m : offset+m+int(v1)]
+ value = b.data[offset+m+int(v1) : offset+n]
+ nShared = int(v0)
+ return
+}
+
+func (b *block) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type dir int
+
+const (
+ dirReleased dir = iota - 1
+ dirSOI
+ dirEOI
+ dirBackward
+ dirForward
+)
+
+type blockIter struct {
+ tr *Reader
+ block *block
+ blockReleaser util.Releaser
+ releaser util.Releaser
+ key, value []byte
+ offset int
+ // Previous offset, only filled by Next.
+ prevOffset int
+ prevNode []int
+ prevKeys []byte
+ restartIndex int
+ // Iterator direction.
+ dir dir
+ // Restart index slice range.
+ riStart int
+ riLimit int
+ // Offset slice range.
+ offsetStart int
+ offsetRealStart int
+ offsetLimit int
+ // Error.
+ err error
+}
+
+func (i *blockIter) sErr(err error) {
+ i.err = err
+ i.key = nil
+ i.value = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+}
+
+func (i *blockIter) reset() {
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ i.dir = dirSOI
+ i.key = i.key[:0]
+ i.value = nil
+}
+
+func (i *blockIter) isFirst() bool {
+ switch i.dir {
+ case dirForward:
+ return i.prevOffset == i.offsetRealStart
+ case dirBackward:
+ return len(i.prevNode) == 1 && i.restartIndex == i.riStart
+ }
+ return false
+}
+
+func (i *blockIter) isLast() bool {
+ switch i.dir {
+ case dirForward, dirBackward:
+ return i.offset == i.offsetLimit
+ }
+ return false
+}
+
+func (i *blockIter) First() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirSOI
+ return i.Next()
+}
+
+func (i *blockIter) Last() bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ i.dir = dirEOI
+ return i.Prev()
+}
+
+func (i *blockIter) Seek(key []byte) bool {
+ if i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ ri, offset, err := i.block.seek(i.tr.cmp, i.riStart, i.riLimit, key)
+ if err != nil {
+ i.sErr(err)
+ return false
+ }
+ i.restartIndex = ri
+ i.offset = max(i.offsetStart, offset)
+ if i.dir == dirSOI || i.dir == dirEOI {
+ i.dir = dirForward
+ }
+ for i.Next() {
+ if i.tr.cmp.Compare(i.key, key) >= 0 {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *blockIter) Next() bool {
+ if i.dir == dirEOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ if i.dir == dirSOI {
+ i.restartIndex = i.riStart
+ i.offset = i.offsetStart
+ } else if i.dir == dirBackward {
+ i.prevNode = i.prevNode[:0]
+ i.prevKeys = i.prevKeys[:0]
+ }
+ for i.offset < i.offsetRealStart {
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.offset += n
+ }
+ if i.offset >= i.offsetLimit {
+ i.dir = dirEOI
+ if i.offset != i.offsetLimit {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ }
+ return false
+ }
+ key, value, nShared, n, err := i.block.entry(i.offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if n == 0 {
+ i.dir = dirEOI
+ return false
+ }
+ i.key = append(i.key[:nShared], key...)
+ i.value = value
+ i.prevOffset = i.offset
+ i.offset += n
+ i.dir = dirForward
+ return true
+}
+
+func (i *blockIter) Prev() bool {
+ if i.dir == dirSOI || i.err != nil {
+ return false
+ } else if i.dir == dirReleased {
+ i.err = ErrIterReleased
+ return false
+ }
+
+ var ri int
+ if i.dir == dirForward {
+ // Change direction.
+ i.offset = i.prevOffset
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.block.restartIndex(i.restartIndex, i.riLimit, i.offset)
+ i.dir = dirBackward
+ } else if i.dir == dirEOI {
+ // At the end of iterator.
+ i.restartIndex = i.riLimit
+ i.offset = i.offsetLimit
+ if i.offset == i.offsetRealStart {
+ i.dir = dirSOI
+ return false
+ }
+ ri = i.riLimit - 1
+ i.dir = dirBackward
+ } else if len(i.prevNode) == 1 {
+ // This is the end of a restart range.
+ i.offset = i.prevNode[0]
+ i.prevNode = i.prevNode[:0]
+ if i.restartIndex == i.riStart {
+ i.dir = dirSOI
+ return false
+ }
+ i.restartIndex--
+ ri = i.restartIndex
+ } else {
+ // In the middle of restart range, get from cache.
+ n := len(i.prevNode) - 3
+ node := i.prevNode[n:]
+ i.prevNode = i.prevNode[:n]
+ // Get the key.
+ ko := node[0]
+ i.key = append(i.key[:0], i.prevKeys[ko:]...)
+ i.prevKeys = i.prevKeys[:ko]
+ // Get the value.
+ vo := node[1]
+ vl := vo + node[2]
+ i.value = i.block.data[vo:vl]
+ i.offset = vl
+ return true
+ }
+ // Build entries cache.
+ i.key = i.key[:0]
+ i.value = nil
+ offset := i.block.restartOffset(ri)
+ if offset == i.offset {
+ ri--
+ if ri < 0 {
+ i.dir = dirSOI
+ return false
+ }
+ offset = i.block.restartOffset(ri)
+ }
+ i.prevNode = append(i.prevNode, offset)
+ for {
+ key, value, nShared, n, err := i.block.entry(offset)
+ if err != nil {
+ i.sErr(i.tr.fixErrCorruptedBH(i.block.bh, err))
+ return false
+ }
+ if offset >= i.offsetRealStart {
+ if i.value != nil {
+ // Appends 3 variables:
+ // 1. Previous keys offset
+ // 2. Value offset in the data block
+ // 3. Value length
+ i.prevNode = append(i.prevNode, len(i.prevKeys), offset-len(i.value), len(i.value))
+ i.prevKeys = append(i.prevKeys, i.key...)
+ }
+ i.value = value
+ }
+ i.key = append(i.key[:nShared], key...)
+ offset += n
+ // Stop if target offset reached.
+ if offset >= i.offset {
+ if offset != i.offset {
+ i.sErr(i.tr.newErrCorruptedBH(i.block.bh, "entries offset not aligned"))
+ return false
+ }
+
+ break
+ }
+ }
+ i.restartIndex = ri
+ i.offset = offset
+ return true
+}
+
+func (i *blockIter) Key() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.key
+}
+
+func (i *blockIter) Value() []byte {
+ if i.err != nil || i.dir <= dirEOI {
+ return nil
+ }
+ return i.value
+}
+
+func (i *blockIter) Release() {
+ if i.dir != dirReleased {
+ i.tr = nil
+ i.block = nil
+ i.prevNode = nil
+ i.prevKeys = nil
+ i.key = nil
+ i.value = nil
+ i.dir = dirReleased
+ if i.blockReleaser != nil {
+ i.blockReleaser.Release()
+ i.blockReleaser = nil
+ }
+ if i.releaser != nil {
+ i.releaser.Release()
+ i.releaser = nil
+ }
+ }
+}
+
+func (i *blockIter) SetReleaser(releaser util.Releaser) {
+ if i.dir == dirReleased {
+ panic(util.ErrReleased)
+ }
+ if i.releaser != nil && releaser != nil {
+ panic(util.ErrHasReleaser)
+ }
+ i.releaser = releaser
+}
+
+func (i *blockIter) Valid() bool {
+ return i.err == nil && (i.dir == dirBackward || i.dir == dirForward)
+}
+
+func (i *blockIter) Error() error {
+ return i.err
+}
+
+type filterBlock struct {
+ bpool *util.BufferPool
+ data []byte
+ oOffset int
+ baseLg uint
+ filtersNum int
+}
+
+func (b *filterBlock) contains(filter filter.Filter, offset uint64, key []byte) bool {
+ i := int(offset >> b.baseLg)
+ if i < b.filtersNum {
+ o := b.data[b.oOffset+i*4:]
+ n := int(binary.LittleEndian.Uint32(o))
+ m := int(binary.LittleEndian.Uint32(o[4:]))
+ if n < m && m <= b.oOffset {
+ return filter.Contains(b.data[n:m], key)
+ } else if n == m {
+ return false
+ }
+ }
+ return true
+}
+
+func (b *filterBlock) Release() {
+ b.bpool.Put(b.data)
+ b.bpool = nil
+ b.data = nil
+}
+
+type indexIter struct {
+ *blockIter
+ tr *Reader
+ slice *util.Range
+ // Options
+ fillCache bool
+}
+
+func (i *indexIter) Get() iterator.Iterator {
+ value := i.Value()
+ if value == nil {
+ return nil
+ }
+ dataBH, n := decodeBlockHandle(value)
+ if n == 0 {
+ return iterator.NewEmptyIterator(i.tr.newErrCorruptedBH(i.tr.indexBH, "bad data block handle"))
+ }
+
+ var slice *util.Range
+ if i.slice != nil && (i.blockIter.isFirst() || i.blockIter.isLast()) {
+ slice = i.slice
+ }
+ return i.tr.getDataIterErr(dataBH, slice, i.tr.verifyChecksum, i.fillCache)
+}
+
+// Reader is a table reader.
+type Reader struct {
+ mu sync.RWMutex
+ fd storage.FileDesc
+ reader io.ReaderAt
+ cache *cache.NamespaceGetter
+ err error
+ bpool *util.BufferPool
+ // Options
+ o *opt.Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ verifyChecksum bool
+
+ dataEnd int64
+ metaBH, indexBH, filterBH blockHandle
+ indexBlock *block
+ filterBlock *filterBlock
+}
+
+func (r *Reader) blockKind(bh blockHandle) string {
+ switch bh.offset {
+ case r.metaBH.offset:
+ return "meta-block"
+ case r.indexBH.offset:
+ return "index-block"
+ case r.filterBH.offset:
+ if r.filterBH.length > 0 {
+ return "filter-block"
+ }
+ }
+ return "data-block"
+}
+
+func (r *Reader) newErrCorrupted(pos, size int64, kind, reason string) error {
+ return &errors.ErrCorrupted{Fd: r.fd, Err: &ErrCorrupted{Pos: pos, Size: size, Kind: kind, Reason: reason}}
+}
+
+func (r *Reader) newErrCorruptedBH(bh blockHandle, reason string) error {
+ return r.newErrCorrupted(int64(bh.offset), int64(bh.length), r.blockKind(bh), reason)
+}
+
+func (r *Reader) fixErrCorruptedBH(bh blockHandle, err error) error {
+ if cerr, ok := err.(*ErrCorrupted); ok {
+ cerr.Pos = int64(bh.offset)
+ cerr.Size = int64(bh.length)
+ cerr.Kind = r.blockKind(bh)
+ return &errors.ErrCorrupted{Fd: r.fd, Err: cerr}
+ }
+ return err
+}
+
+func (r *Reader) readRawBlock(bh blockHandle, verifyChecksum bool) ([]byte, error) {
+ data := r.bpool.Get(int(bh.length + blockTrailerLen))
+ if _, err := r.reader.ReadAt(data, int64(bh.offset)); err != nil && err != io.EOF {
+ return nil, err
+ }
+
+ if verifyChecksum {
+ n := bh.length + 1
+ checksum0 := binary.LittleEndian.Uint32(data[n:])
+ checksum1 := util.NewCRC(data[:n]).Value()
+ if checksum0 != checksum1 {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("checksum mismatch, want=%#x got=%#x", checksum0, checksum1))
+ }
+ }
+
+ switch data[bh.length] {
+ case blockTypeNoCompression:
+ data = data[:bh.length]
+ case blockTypeSnappyCompression:
+ decLen, err := snappy.DecodedLen(data[:bh.length])
+ if err != nil {
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ decData := r.bpool.Get(decLen)
+ decData, err = snappy.Decode(decData, data[:bh.length])
+ r.bpool.Put(data)
+ if err != nil {
+ r.bpool.Put(decData)
+ return nil, r.newErrCorruptedBH(bh, err.Error())
+ }
+ data = decData
+ default:
+ r.bpool.Put(data)
+ return nil, r.newErrCorruptedBH(bh, fmt.Sprintf("unknown compression type %#x", data[bh.length]))
+ }
+ return data, nil
+}
+
+func (r *Reader) readBlock(bh blockHandle, verifyChecksum bool) (*block, error) {
+ data, err := r.readRawBlock(bh, verifyChecksum)
+ if err != nil {
+ return nil, err
+ }
+ restartsLen := int(binary.LittleEndian.Uint32(data[len(data)-4:]))
+ b := &block{
+ bpool: r.bpool,
+ bh: bh,
+ data: data,
+ restartsLen: restartsLen,
+ restartsOffset: len(data) - (restartsLen+1)*4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readBlockCached(bh blockHandle, verifyChecksum, fillCache bool) (*block, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *block
+ b, err = r.readBlock(bh, verifyChecksum)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*block)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readBlock(bh, verifyChecksum)
+ return b, b, err
+}
+
+func (r *Reader) readFilterBlock(bh blockHandle) (*filterBlock, error) {
+ data, err := r.readRawBlock(bh, true)
+ if err != nil {
+ return nil, err
+ }
+ n := len(data)
+ if n < 5 {
+ return nil, r.newErrCorruptedBH(bh, "too short")
+ }
+ m := n - 5
+ oOffset := int(binary.LittleEndian.Uint32(data[m:]))
+ if oOffset > m {
+ return nil, r.newErrCorruptedBH(bh, "invalid data-offsets offset")
+ }
+ b := &filterBlock{
+ bpool: r.bpool,
+ data: data,
+ oOffset: oOffset,
+ baseLg: uint(data[n-1]),
+ filtersNum: (m - oOffset) / 4,
+ }
+ return b, nil
+}
+
+func (r *Reader) readFilterBlockCached(bh blockHandle, fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.cache != nil {
+ var (
+ err error
+ ch *cache.Handle
+ )
+ if fillCache {
+ ch = r.cache.Get(bh.offset, func() (size int, value cache.Value) {
+ var b *filterBlock
+ b, err = r.readFilterBlock(bh)
+ if err != nil {
+ return 0, nil
+ }
+ return cap(b.data), b
+ })
+ } else {
+ ch = r.cache.Get(bh.offset, nil)
+ }
+ if ch != nil {
+ b, ok := ch.Value().(*filterBlock)
+ if !ok {
+ ch.Release()
+ return nil, nil, errors.New("leveldb/table: inconsistent block type")
+ }
+ return b, ch, err
+ } else if err != nil {
+ return nil, nil, err
+ }
+ }
+
+ b, err := r.readFilterBlock(bh)
+ return b, b, err
+}
+
+func (r *Reader) getIndexBlock(fillCache bool) (b *block, rel util.Releaser, err error) {
+ if r.indexBlock == nil {
+ return r.readBlockCached(r.indexBH, true, fillCache)
+ }
+ return r.indexBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) getFilterBlock(fillCache bool) (*filterBlock, util.Releaser, error) {
+ if r.filterBlock == nil {
+ return r.readFilterBlockCached(r.filterBH, fillCache)
+ }
+ return r.filterBlock, util.NoopReleaser{}, nil
+}
+
+func (r *Reader) newBlockIter(b *block, bReleaser util.Releaser, slice *util.Range, inclLimit bool) *blockIter {
+ bi := &blockIter{
+ tr: r,
+ block: b,
+ blockReleaser: bReleaser,
+ // Valid key should never be nil.
+ key: make([]byte, 0),
+ dir: dirSOI,
+ riStart: 0,
+ riLimit: b.restartsLen,
+ offsetStart: 0,
+ offsetRealStart: 0,
+ offsetLimit: b.restartsOffset,
+ }
+ if slice != nil {
+ if slice.Start != nil {
+ if bi.Seek(slice.Start) {
+ bi.riStart = b.restartIndex(bi.restartIndex, b.restartsLen, bi.prevOffset)
+ bi.offsetStart = b.restartOffset(bi.riStart)
+ bi.offsetRealStart = bi.prevOffset
+ } else {
+ bi.riStart = b.restartsLen
+ bi.offsetStart = b.restartsOffset
+ bi.offsetRealStart = b.restartsOffset
+ }
+ }
+ if slice.Limit != nil {
+ if bi.Seek(slice.Limit) && (!inclLimit || bi.Next()) {
+ bi.offsetLimit = bi.prevOffset
+ bi.riLimit = bi.restartIndex + 1
+ }
+ }
+ bi.reset()
+ if bi.offsetStart > bi.offsetLimit {
+ bi.sErr(errors.New("leveldb/table: invalid slice range"))
+ }
+ }
+ return bi
+}
+
+func (r *Reader) getDataIter(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ b, rel, err := r.readBlockCached(dataBH, verifyChecksum, fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ return r.newBlockIter(b, rel, slice, false)
+}
+
+func (r *Reader) getDataIterErr(dataBH blockHandle, slice *util.Range, verifyChecksum, fillCache bool) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ return r.getDataIter(dataBH, slice, verifyChecksum, fillCache)
+}
+
+// NewIterator creates an iterator from the table.
+//
+// Slice allows slicing the iterator to only contains keys in the given
+// range. A nil Range.Start is treated as a key before all keys in the
+// table. And a nil Range.Limit is treated as a key after all keys in
+// the table.
+//
+// The returned iterator is not safe for concurrent use and should be released
+// after use.
+//
+// Also read Iterator documentation of the leveldb/iterator package.
+func (r *Reader) NewIterator(slice *util.Range, ro *opt.ReadOptions) iterator.Iterator {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ return iterator.NewEmptyIterator(r.err)
+ }
+
+ fillCache := !ro.GetDontFillCache()
+ indexBlock, rel, err := r.getIndexBlock(fillCache)
+ if err != nil {
+ return iterator.NewEmptyIterator(err)
+ }
+ index := &indexIter{
+ blockIter: r.newBlockIter(indexBlock, rel, slice, true),
+ tr: r,
+ slice: slice,
+ fillCache: !ro.GetDontFillCache(),
+ }
+ return iterator.NewIndexedIterator(index, opt.GetStrict(r.o, ro, opt.StrictReader))
+}
+
+func (r *Reader) find(key []byte, filtered bool, ro *opt.ReadOptions, noValue bool) (rkey, value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.getIndexBlock(true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+
+ if !index.Seek(key) {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ // The filter should only used for exact match.
+ if filtered && r.filter != nil {
+ filterBlock, frel, ferr := r.getFilterBlock(true)
+ if ferr == nil {
+ if !filterBlock.contains(r.filter, dataBH.offset, key) {
+ frel.Release()
+ return nil, nil, ErrNotFound
+ }
+ frel.Release()
+ } else if !errors.IsCorrupted(ferr) {
+ return nil, nil, ferr
+ }
+ }
+
+ data := r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Seek(key) {
+ data.Release()
+ if err = data.Error(); err != nil {
+ return
+ }
+
+ // The nearest greater-than key is the first key of the next block.
+ if !index.Next() {
+ if err = index.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+
+ dataBH, n = decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return nil, nil, r.err
+ }
+
+ data = r.getDataIter(dataBH, nil, r.verifyChecksum, !ro.GetDontFillCache())
+ if !data.Next() {
+ data.Release()
+ if err = data.Error(); err == nil {
+ err = ErrNotFound
+ }
+ return
+ }
+ }
+
+ // Key doesn't use block buffer, no need to copy the buffer.
+ rkey = data.Key()
+ if !noValue {
+ if r.bpool == nil {
+ value = data.Value()
+ } else {
+ // Value does use block buffer, and since the buffer will be
+ // recycled, it need to be copied.
+ value = append([]byte{}, data.Value()...)
+ }
+ }
+ data.Release()
+ return
+}
+
+// Find finds key/value pair whose key is greater than or equal to the
+// given key. It returns ErrNotFound if the table doesn't contain
+// such pair.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such pair doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Find(key []byte, filtered bool, ro *opt.ReadOptions) (rkey, value []byte, err error) {
+ return r.find(key, filtered, ro, false)
+}
+
+// FindKey finds key that is greater than or equal to the given key.
+// It returns ErrNotFound if the table doesn't contain such key.
+// If filtered is true then the nearest 'block' will be checked against
+// 'filter data' (if present) and will immediately return ErrNotFound if
+// 'filter data' indicates that such key doesn't exist.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) FindKey(key []byte, filtered bool, ro *opt.ReadOptions) (rkey []byte, err error) {
+ rkey, _, err = r.find(key, filtered, ro, true)
+ return
+}
+
+// Get gets the value for the given key. It returns errors.ErrNotFound
+// if the table does not contain the key.
+//
+// The caller may modify the contents of the returned slice as it is its
+// own copy.
+// It is safe to modify the contents of the argument after Find returns.
+func (r *Reader) Get(key []byte, ro *opt.ReadOptions) (value []byte, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ rkey, value, err := r.find(key, false, ro, false)
+ if err == nil && r.cmp.Compare(rkey, key) != 0 {
+ value = nil
+ err = ErrNotFound
+ }
+ return
+}
+
+// OffsetOf returns approximate offset for the given key.
+//
+// It is safe to modify the contents of the argument after Get returns.
+func (r *Reader) OffsetOf(key []byte) (offset int64, err error) {
+ r.mu.RLock()
+ defer r.mu.RUnlock()
+
+ if r.err != nil {
+ err = r.err
+ return
+ }
+
+ indexBlock, rel, err := r.readBlockCached(r.indexBH, true, true)
+ if err != nil {
+ return
+ }
+ defer rel.Release()
+
+ index := r.newBlockIter(indexBlock, nil, nil, true)
+ defer index.Release()
+ if index.Seek(key) {
+ dataBH, n := decodeBlockHandle(index.Value())
+ if n == 0 {
+ r.err = r.newErrCorruptedBH(r.indexBH, "bad data block handle")
+ return
+ }
+ offset = int64(dataBH.offset)
+ return
+ }
+ err = index.Error()
+ if err == nil {
+ offset = r.dataEnd
+ }
+ return
+}
+
+// Release implements util.Releaser.
+// It also close the file if it is an io.Closer.
+func (r *Reader) Release() {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if closer, ok := r.reader.(io.Closer); ok {
+ closer.Close()
+ }
+ if r.indexBlock != nil {
+ r.indexBlock.Release()
+ r.indexBlock = nil
+ }
+ if r.filterBlock != nil {
+ r.filterBlock.Release()
+ r.filterBlock = nil
+ }
+ r.reader = nil
+ r.cache = nil
+ r.bpool = nil
+ r.err = ErrReaderReleased
+}
+
+// NewReader creates a new initialized table reader for the file.
+// The fi, cache and bpool is optional and can be nil.
+//
+// The returned table reader instance is safe for concurrent use.
+func NewReader(f io.ReaderAt, size int64, fd storage.FileDesc, cache *cache.NamespaceGetter, bpool *util.BufferPool, o *opt.Options) (*Reader, error) {
+ if f == nil {
+ return nil, errors.New("leveldb/table: nil file")
+ }
+
+ r := &Reader{
+ fd: fd,
+ reader: f,
+ cache: cache,
+ bpool: bpool,
+ o: o,
+ cmp: o.GetComparer(),
+ verifyChecksum: o.GetStrict(opt.StrictBlockChecksum),
+ }
+
+ if size < footerLen {
+ r.err = r.newErrCorrupted(0, size, "table", "too small")
+ return r, nil
+ }
+
+ footerPos := size - footerLen
+ var footer [footerLen]byte
+ if _, err := r.reader.ReadAt(footer[:], footerPos); err != nil && err != io.EOF {
+ return nil, err
+ }
+ if string(footer[footerLen-len(magic):footerLen]) != magic {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad magic number")
+ return r, nil
+ }
+
+ var n int
+ // Decode the metaindex block handle.
+ r.metaBH, n = decodeBlockHandle(footer[:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad metaindex block handle")
+ return r, nil
+ }
+
+ // Decode the index block handle.
+ r.indexBH, n = decodeBlockHandle(footer[n:])
+ if n == 0 {
+ r.err = r.newErrCorrupted(footerPos, footerLen, "table-footer", "bad index block handle")
+ return r, nil
+ }
+
+ // Read metaindex block.
+ metaBlock, err := r.readBlock(r.metaBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+
+ // Set data end.
+ r.dataEnd = int64(r.metaBH.offset)
+
+ // Read metaindex.
+ metaIter := r.newBlockIter(metaBlock, nil, nil, true)
+ for metaIter.Next() {
+ key := string(metaIter.Key())
+ if !strings.HasPrefix(key, "filter.") {
+ continue
+ }
+ fn := key[7:]
+ if f0 := o.GetFilter(); f0 != nil && f0.Name() == fn {
+ r.filter = f0
+ } else {
+ for _, f0 := range o.GetAltFilters() {
+ if f0.Name() == fn {
+ r.filter = f0
+ break
+ }
+ }
+ }
+ if r.filter != nil {
+ filterBH, n := decodeBlockHandle(metaIter.Value())
+ if n == 0 {
+ continue
+ }
+ r.filterBH = filterBH
+ // Update data end.
+ r.dataEnd = int64(filterBH.offset)
+ break
+ }
+ }
+ metaIter.Release()
+ metaBlock.Release()
+
+ // Cache index and filter block locally, since we don't have global cache.
+ if cache == nil {
+ r.indexBlock, err = r.readBlock(r.indexBH, true)
+ if err != nil {
+ if errors.IsCorrupted(err) {
+ r.err = err
+ return r, nil
+ }
+ return nil, err
+ }
+ if r.filter != nil {
+ r.filterBlock, err = r.readFilterBlock(r.filterBH)
+ if err != nil {
+ if !errors.IsCorrupted(err) {
+ return nil, err
+ }
+
+ // Don't use filter then.
+ r.filter = nil
+ }
+ }
+ }
+
+ return r, nil
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
new file mode 100644
index 000000000..beacdc1f0
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/table.go
@@ -0,0 +1,177 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Package table allows read and write sorted key/value.
+package table
+
+import (
+ "encoding/binary"
+)
+
+/*
+Table:
+
+Table is consist of one or more data blocks, an optional filter block
+a metaindex block, an index block and a table footer. Metaindex block
+is a special block used to keep parameters of the table, such as filter
+block name and its block handle. Index block is a special block used to
+keep record of data blocks offset and length, index block use one as
+restart interval. The key used by index block are the last key of preceding
+block, shorter separator of adjacent blocks or shorter successor of the
+last key of the last block. Filter block is an optional block contains
+sequence of filter data generated by a filter generator.
+
+Table data structure:
+ + optional
+ /
+ +--------------+--------------+--------------+------+-------+-----------------+-------------+--------+
+ | data block 1 | ... | data block n | filter block | metaindex block | index block | footer |
+ +--------------+--------------+--------------+--------------+-----------------+-------------+--------+
+
+ Each block followed by a 5-bytes trailer contains compression type and checksum.
+
+Table block trailer:
+
+ +---------------------------+-------------------+
+ | compression type (1-byte) | checksum (4-byte) |
+ +---------------------------+-------------------+
+
+ The checksum is a CRC-32 computed using Castagnoli's polynomial. Compression
+ type also included in the checksum.
+
+Table footer:
+
+ +------------------- 40-bytes -------------------+
+ / \
+ +------------------------+--------------------+------+-----------------+
+ | metaindex block handle / index block handle / ---- | magic (8-bytes) |
+ +------------------------+--------------------+------+-----------------+
+
+ The magic are first 64-bit of SHA-1 sum of "http://code.google.com/p/leveldb/".
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Block:
+
+Block is consist of one or more key/value entries and a block trailer.
+Block entry shares key prefix with its preceding key until a restart
+point reached. A block should contains at least one restart point.
+First restart point are always zero.
+
+Block data structure:
+
+ + restart point + restart point (depends on restart interval)
+ / /
+ +---------------+---------------+---------------+---------------+---------+
+ | block entry 1 | block entry 2 | ... | block entry n | trailer |
+ +---------------+---------------+---------------+---------------+---------+
+
+Key/value entry:
+
+ +---- key len ----+
+ / \
+ +-------+---------+-----------+---------+--------------------+--------------+----------------+
+ | shared (varint) | not shared (varint) | value len (varint) | key (varlen) | value (varlen) |
+ +-----------------+---------------------+--------------------+--------------+----------------+
+
+ Block entry shares key prefix with its preceding key:
+ Conditions:
+ restart_interval=2
+ entry one : key=deck,value=v1
+ entry two : key=dock,value=v2
+ entry three: key=duck,value=v3
+ The entries will be encoded as follow:
+
+ + restart point (offset=0) + restart point (offset=16)
+ / /
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ | 0 | 4 | 2 | "deck" | "v1" | 1 | 3 | 2 | "ock" | "v2" | 0 | 4 | 2 | "duck" | "v3" |
+ +-----+-----+-----+----------+--------+-----+-----+-----+---------+--------+-----+-----+-----+----------+--------+
+ \ / \ / \ /
+ +----------- entry one -----------+ +----------- entry two ----------+ +---------- entry three ----------+
+
+ The block trailer will contains two restart points:
+
+ +------------+-----------+--------+
+ | 0 | 16 | 2 |
+ +------------+-----------+---+----+
+ \ / \
+ +-- restart points --+ + restart points length
+
+Block trailer:
+
+ +-- 4-bytes --+
+ / \
+ +-----------------+-----------------+-----------------+------------------------------+
+ | restart point 1 | .... | restart point n | restart points len (4-bytes) |
+ +-----------------+-----------------+-----------------+------------------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+/*
+Filter block:
+
+Filter block consist of one or more filter data and a filter block trailer.
+The trailer contains filter data offsets, a trailer offset and a 1-byte base Lg.
+
+Filter block data structure:
+
+ + offset 1 + offset 2 + offset n + trailer offset
+ / / / /
+ +---------------+---------------+---------------+---------+
+ | filter data 1 | ... | filter data n | trailer |
+ +---------------+---------------+---------------+---------+
+
+Filter block trailer:
+
+ +- 4-bytes -+
+ / \
+ +---------------+---------------+---------------+-------------------------------+------------------+
+ | data 1 offset | .... | data n offset | data-offsets offset (4-bytes) | base Lg (1-byte) |
+ +-------------- +---------------+---------------+-------------------------------+------------------+
+
+
+NOTE: All fixed-length integer are little-endian.
+*/
+
+const (
+ blockTrailerLen = 5
+ footerLen = 48
+
+ magic = "\x57\xfb\x80\x8b\x24\x75\x47\xdb"
+
+ // The block type gives the per-block compression format.
+ // These constants are part of the file format and should not be changed.
+ blockTypeNoCompression = 0
+ blockTypeSnappyCompression = 1
+
+ // Generate new filter every 2KB of data
+ filterBaseLg = 11
+ filterBase = 1 << filterBaseLg
+)
+
+type blockHandle struct {
+ offset, length uint64
+}
+
+func decodeBlockHandle(src []byte) (blockHandle, int) {
+ offset, n := binary.Uvarint(src)
+ length, m := binary.Uvarint(src[n:])
+ if n == 0 || m == 0 {
+ return blockHandle{}, 0
+ }
+ return blockHandle{offset, length}, n + m
+}
+
+func encodeBlockHandle(dst []byte, b blockHandle) int {
+ n := binary.PutUvarint(dst, b.offset)
+ m := binary.PutUvarint(dst[n:], b.length)
+ return n + m
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
new file mode 100644
index 000000000..b96b271d8
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/table/writer.go
@@ -0,0 +1,375 @@
+// Copyright (c) 2012, Suryandaru Triandana
+// All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+package table
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/golang/snappy"
+
+ "github.com/syndtr/goleveldb/leveldb/comparer"
+ "github.com/syndtr/goleveldb/leveldb/filter"
+ "github.com/syndtr/goleveldb/leveldb/opt"
+ "github.com/syndtr/goleveldb/leveldb/util"
+)
+
+func sharedPrefixLen(a, b []byte) int {
+ i, n := 0, len(a)
+ if n > len(b) {
+ n = len(b)
+ }
+ for i < n && a[i] == b[i] {
+ i++
+ }
+ return i
+}
+
+type blockWriter struct {
+ restartInterval int
+ buf util.Buffer
+ nEntries int
+ prevKey []byte
+ restarts []uint32
+ scratch []byte
+}
+
+func (w *blockWriter) append(key, value []byte) {
+ nShared := 0
+ if w.nEntries%w.restartInterval == 0 {
+ w.restarts = append(w.restarts, uint32(w.buf.Len()))
+ } else {
+ nShared = sharedPrefixLen(w.prevKey, key)
+ }
+ n := binary.PutUvarint(w.scratch[0:], uint64(nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(key)-nShared))
+ n += binary.PutUvarint(w.scratch[n:], uint64(len(value)))
+ w.buf.Write(w.scratch[:n])
+ w.buf.Write(key[nShared:])
+ w.buf.Write(value)
+ w.prevKey = append(w.prevKey[:0], key...)
+ w.nEntries++
+}
+
+func (w *blockWriter) finish() {
+ // Write restarts entry.
+ if w.nEntries == 0 {
+ // Must have at least one restart entry.
+ w.restarts = append(w.restarts, 0)
+ }
+ w.restarts = append(w.restarts, uint32(len(w.restarts)))
+ for _, x := range w.restarts {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+}
+
+func (w *blockWriter) reset() {
+ w.buf.Reset()
+ w.nEntries = 0
+ w.restarts = w.restarts[:0]
+}
+
+func (w *blockWriter) bytesLen() int {
+ restartsLen := len(w.restarts)
+ if restartsLen == 0 {
+ restartsLen = 1
+ }
+ return w.buf.Len() + 4*restartsLen + 4
+}
+
+type filterWriter struct {
+ generator filter.FilterGenerator
+ buf util.Buffer
+ nKeys int
+ offsets []uint32
+}
+
+func (w *filterWriter) add(key []byte) {
+ if w.generator == nil {
+ return
+ }
+ w.generator.Add(key)
+ w.nKeys++
+}
+
+func (w *filterWriter) flush(offset uint64) {
+ if w.generator == nil {
+ return
+ }
+ for x := int(offset / filterBase); x > len(w.offsets); {
+ w.generate()
+ }
+}
+
+func (w *filterWriter) finish() {
+ if w.generator == nil {
+ return
+ }
+ // Generate last keys.
+
+ if w.nKeys > 0 {
+ w.generate()
+ }
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ for _, x := range w.offsets {
+ buf4 := w.buf.Alloc(4)
+ binary.LittleEndian.PutUint32(buf4, x)
+ }
+ w.buf.WriteByte(filterBaseLg)
+}
+
+func (w *filterWriter) generate() {
+ // Record offset.
+ w.offsets = append(w.offsets, uint32(w.buf.Len()))
+ // Generate filters.
+ if w.nKeys > 0 {
+ w.generator.Generate(&w.buf)
+ w.nKeys = 0
+ }
+}
+
+// Writer is a table writer.
+type Writer struct {
+ writer io.Writer
+ err error
+ // Options
+ cmp comparer.Comparer
+ filter filter.Filter
+ compression opt.Compression
+ blockSize int
+
+ dataBlock blockWriter
+ indexBlock blockWriter
+ filterBlock filterWriter
+ pendingBH blockHandle
+ offset uint64
+ nEntries int
+ // Scratch allocated enough for 5 uvarint. Block writer should not use
+ // first 20-bytes since it will be used to encode block handle, which
+ // then passed to the block writer itself.
+ scratch [50]byte
+ comparerScratch []byte
+ compressionScratch []byte
+}
+
+func (w *Writer) writeBlock(buf *util.Buffer, compression opt.Compression) (bh blockHandle, err error) {
+ // Compress the buffer if necessary.
+ var b []byte
+ if compression == opt.SnappyCompression {
+ // Allocate scratch enough for compression and block trailer.
+ if n := snappy.MaxEncodedLen(buf.Len()) + blockTrailerLen; len(w.compressionScratch) < n {
+ w.compressionScratch = make([]byte, n)
+ }
+ compressed := snappy.Encode(w.compressionScratch, buf.Bytes())
+ n := len(compressed)
+ b = compressed[:n+blockTrailerLen]
+ b[n] = blockTypeSnappyCompression
+ } else {
+ tmp := buf.Alloc(blockTrailerLen)
+ tmp[0] = blockTypeNoCompression
+ b = buf.Bytes()
+ }
+
+ // Calculate the checksum.
+ n := len(b) - 4
+ checksum := util.NewCRC(b[:n]).Value()
+ binary.LittleEndian.PutUint32(b[n:], checksum)
+
+ // Write the buffer to the file.
+ _, err = w.writer.Write(b)
+ if err != nil {
+ return
+ }
+ bh = blockHandle{w.offset, uint64(len(b) - blockTrailerLen)}
+ w.offset += uint64(len(b))
+ return
+}
+
+func (w *Writer) flushPendingBH(key []byte) {
+ if w.pendingBH.length == 0 {
+ return
+ }
+ var separator []byte
+ if len(key) == 0 {
+ separator = w.cmp.Successor(w.comparerScratch[:0], w.dataBlock.prevKey)
+ } else {
+ separator = w.cmp.Separator(w.comparerScratch[:0], w.dataBlock.prevKey, key)
+ }
+ if separator == nil {
+ separator = w.dataBlock.prevKey
+ } else {
+ w.comparerScratch = separator
+ }
+ n := encodeBlockHandle(w.scratch[:20], w.pendingBH)
+ // Append the block handle to the index block.
+ w.indexBlock.append(separator, w.scratch[:n])
+ // Reset prev key of the data block.
+ w.dataBlock.prevKey = w.dataBlock.prevKey[:0]
+ // Clear pending block handle.
+ w.pendingBH = blockHandle{}
+}
+
+func (w *Writer) finishBlock() error {
+ w.dataBlock.finish()
+ bh, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ return err
+ }
+ w.pendingBH = bh
+ // Reset the data block.
+ w.dataBlock.reset()
+ // Flush the filter block.
+ w.filterBlock.flush(w.offset)
+ return nil
+}
+
+// Append appends key/value pair to the table. The keys passed must
+// be in increasing order.
+//
+// It is safe to modify the contents of the arguments after Append returns.
+func (w *Writer) Append(key, value []byte) error {
+ if w.err != nil {
+ return w.err
+ }
+ if w.nEntries > 0 && w.cmp.Compare(w.dataBlock.prevKey, key) >= 0 {
+ w.err = fmt.Errorf("leveldb/table: Writer: keys are not in increasing order: %q, %q", w.dataBlock.prevKey, key)
+ return w.err
+ }
+
+ w.flushPendingBH(key)
+ // Append key/value pair to the data block.
+ w.dataBlock.append(key, value)
+ // Add key to the filter block.
+ w.filterBlock.add(key)
+
+ // Finish the data block if block size target reached.
+ if w.dataBlock.bytesLen() >= w.blockSize {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.nEntries++
+ return nil
+}
+
+// BlocksLen returns number of blocks written so far.
+func (w *Writer) BlocksLen() int {
+ n := w.indexBlock.nEntries
+ if w.pendingBH.length > 0 {
+ // Includes the pending block.
+ n++
+ }
+ return n
+}
+
+// EntriesLen returns number of entries added so far.
+func (w *Writer) EntriesLen() int {
+ return w.nEntries
+}
+
+// BytesLen returns number of bytes written so far.
+func (w *Writer) BytesLen() int {
+ return int(w.offset)
+}
+
+// Close will finalize the table. Calling Append is not possible
+// after Close, but calling BlocksLen, EntriesLen and BytesLen
+// is still possible.
+func (w *Writer) Close() error {
+ if w.err != nil {
+ return w.err
+ }
+
+ // Write the last data block. Or empty data block if there
+ // aren't any data blocks at all.
+ if w.dataBlock.nEntries > 0 || w.nEntries == 0 {
+ if err := w.finishBlock(); err != nil {
+ w.err = err
+ return w.err
+ }
+ }
+ w.flushPendingBH(nil)
+
+ // Write the filter block.
+ var filterBH blockHandle
+ w.filterBlock.finish()
+ if buf := &w.filterBlock.buf; buf.Len() > 0 {
+ filterBH, w.err = w.writeBlock(buf, opt.NoCompression)
+ if w.err != nil {
+ return w.err
+ }
+ }
+
+ // Write the metaindex block.
+ if filterBH.length > 0 {
+ key := []byte("filter." + w.filter.Name())
+ n := encodeBlockHandle(w.scratch[:20], filterBH)
+ w.dataBlock.append(key, w.scratch[:n])
+ }
+ w.dataBlock.finish()
+ metaindexBH, err := w.writeBlock(&w.dataBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the index block.
+ w.indexBlock.finish()
+ indexBH, err := w.writeBlock(&w.indexBlock.buf, w.compression)
+ if err != nil {
+ w.err = err
+ return w.err
+ }
+
+ // Write the table footer.
+ footer := w.scratch[:footerLen]
+ for i := range footer {
+ footer[i] = 0
+ }
+ n := encodeBlockHandle(footer, metaindexBH)
+ encodeBlockHandle(footer[n:], indexBH)
+ copy(footer[footerLen-len(magic):], magic)
+ if _, err := w.writer.Write(footer); err != nil {
+ w.err = err
+ return w.err
+ }
+ w.offset += footerLen
+
+ w.err = errors.New("leveldb/table: writer is closed")
+ return nil
+}
+
+// NewWriter creates a new initialized table writer for the file.
+//
+// Table writer is not safe for concurrent use.
+func NewWriter(f io.Writer, o *opt.Options) *Writer {
+ w := &Writer{
+ writer: f,
+ cmp: o.GetComparer(),
+ filter: o.GetFilter(),
+ compression: o.GetCompression(),
+ blockSize: o.GetBlockSize(),
+ comparerScratch: make([]byte, 0),
+ }
+ // data block
+ w.dataBlock.restartInterval = o.GetBlockRestartInterval()
+ // The first 20-bytes are used for encoding block handle.
+ w.dataBlock.scratch = w.scratch[20:]
+ // index block
+ w.indexBlock.restartInterval = 1
+ w.indexBlock.scratch = w.scratch[20:]
+ // filter block
+ if w.filter != nil {
+ w.filterBlock.generator = w.filter.NewGenerator()
+ w.filterBlock.flush(0)
+ }
+ return w
+}
diff --git a/vendor/github.com/syndtr/goleveldb/leveldb/util.go b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
new file mode 100644
index 000000000..0e2b519e5
--- /dev/null
+++ b/vendor/github.com/syndtr/goleveldb/leveldb/util.go
@@ -0,0 +1,98 @@
+// Copyright (c) 2012, Suryandaru Triandana